CNN First a brief review
The definition of the model by Keras's Sequential model is as follows.
model = Sequential()
model.add(Layer 1)
model.add(Layer 2)
︙
Each layer is defined as in the example below.
#Fully connected layer
Dense(units, input_dim=784)
#Convolution layer
Conv2D(filters = 32, kernel_size=(2, 2), strides=(1, 1), padding="same", input_shape=(28, 28, 3))
#Pooling layer
MaxPooling2D(pool_size=(2, 2), strides=None)
#Flattening layer
Flatten()
The activation function is treated the same as each layer, as in the following example.
model.add(Activation('sigmoid'))
Strings given to Activation include'sigmoid','relu', and'softmax'.
When I write a model, it looks like this.
from keras.utils.vis_utils import plot_model
from keras.layers import Activation, Conv2D, Dense, Flatten, MaxPooling2D
from keras.models import Sequential
#---------------------------
model = Sequential()
model.add(Conv2D(input_shape=(32, 32, 3), filters=32, kernel_size=(2, 2), strides=(1, 1), padding="same"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=32, kernel_size=(2, 2), strides=(1, 1), padding="same"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('sigmoid'))
model.add(Dense(128))
model.add(Activation('sigmoid'))
model.add(Dense(10))
model.add(Activation('softmax'))
#---------------------------
#Output the structure of the model
model.summary()
After defining the model, how to change the learning rate and what to do with the loss function Compile by specifying what the accuracy index to be displayed is.
model.compile(optimizer='sgd', loss='mean_squared_error', metrics=['accuracy'])
The training of the model is performed as follows.
model.fit(X_train, y_train, batch_size=32, epochs=10)
Forecasting using the model is done as follows.
pred = np.argmax(model.predict(data[0]))
Click here for code example
import matplotlib.pyplot as plt
import numpy as np
from keras.utils.vis_utils import plot_model
from keras import optimizers
from keras.applications.vgg16 import VGG16
from keras.datasets import cifar10
from keras.layers import Dense, Dropout, Flatten, Input, Conv2D, MaxPooling2D, Activation
from keras.models import Model, Sequential
from keras.utils.np_utils import to_categorical
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = X_train[:1000]
X_test = X_test[:1000]
y_train = to_categorical(y_train)[:1000]
y_test = to_categorical(y_test)[:1000]
model = Sequential()
model.add(Conv2D(input_shape=(32, 32, 3), filters=32, kernel_size=(2, 2), strides=(1, 1), padding="same"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=32, kernel_size=(2, 2), strides=(1, 1), padding="same"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('sigmoid'))
model.add(Dense(128))
model.add(Activation('sigmoid'))
model.add(Dense(10))
model.add(Activation('softmax'))
#---------------------------
model.compile(optimizer='sgd', loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=100, epochs=1)
#---------------------------
for i in range(1):
x = X_test[i]
plt.imshow(x)
plt.show()
pred = np.argmax(model.predict(x.reshape(1,32,32,3)))
print(pred)
#Here, transfer learning is performed using VGG16. First, create an instance of the VGG model.
from keras.applications.vgg16 import VGG16
input_tensor = Input(shape=(32, 32, 3))
vgg16 = VGG16(include_top=False, weights='imagenet', input_tensor=input_tensor)
Give the form of the input as input_tersor.
set include_top to false Using only the feature extraction part of VGG Subsequent models will be combined with your own model.
To add another layer after the feature extraction part Define a model different from VGG (top_model in this case) in advance and combine them as follows.
top_model = Sequential()
top_model.add(Flatten(input_shape=vgg16.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dense(10, activation='softmax'))
#Input is vgg.input,The output is, top_model with vgg16 output
model = Model(inputs=vgg16.input, outputs=top_model(vgg16.output))
The weight of the feature extraction part by vgg16 will collapse when it is updated, so fix it as follows.
#Up to the 16th layer of model is a vgg model
for layer in model.layers[:15]:
layer.trainable = False
You can compile and learn in the same way, but when fine-tuning Optimization should be done in SGD.
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])
Click here for code example
import matplotlib.pyplot as plt
import numpy as np
from keras.utils.vis_utils import plot_model
from keras import optimizers
from keras.applications.vgg16 import VGG16
from keras.datasets import cifar10
from keras.layers import Dense, Dropout, Flatten, Input
from keras.models import Model, Sequential
from keras.utils.np_utils import to_categorical
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = X_train[:1000]
X_test = X_test[:1000]
y_train = to_categorical(y_train)[:1000]
y_test = to_categorical(y_test)[:1000]
#Instantiation of vgg16
#---------------------------
input_tensor = Input(shape=(32, 32, 3))
vgg16 = VGG16(include_top=False, weights='imagenet', input_tensor=input_tensor)
#---------------------------
top_model = Sequential()
top_model.add(Flatten(input_shape=vgg16.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dense(10, activation='softmax'))
#Model concatenation
#---------------------------
model = Model(inputs=vgg16.input, outputs=top_model(vgg16.output))
#---------------------------
#Fixed weight of vgg16
#---------------------------
for layer in model.layers[:15]:
layer.trainable = False
#---------------------------
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=100, epochs=1)
for i in range(1):
x = X_test[i]
plt.imshow(x)
plt.show()
pred = np.argmax(model.predict(x.reshape(1,32,32,3)))
print(pred)
When training a neural network model Learning (overfitting) that applies only to training data often occurs.
Here, a method called dropout, in which input data is randomly removed (overwritten with 0), is used. Prevents overfitting.
Dropouts make neural nets independent of the presence of specific neurons You will learn more general features (which do not rely solely on training data).
Dropouts can be used in the same way as regular layers.
model.add(Dropout(rate=0.5))
rate is the rate at which the input unit is dropped (overwritten with 0).
You can also check the classification accuracy of a model in Keras as follows.
score = model.evaluate(X_test, y_test)
X_test and y_test are input data and teacher data for evaluation data.
Click here for code example
import matplotlib.pyplot as plt
import numpy as np
from keras.utils.vis_utils import plot_model
from keras import optimizers
from keras.applications.vgg16 import VGG16
from keras.datasets import cifar10
from keras.layers import Dense, Dropout, Flatten, Input
from keras.models import Model, Sequential
from keras.utils.np_utils import to_categorical
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = X_train[:1000]
X_test = X_test[:1000]
y_train = to_categorical(y_train)[:1000]
y_test = to_categorical(y_test)[:1000]
#Instantiation of vgg16
input_tensor = Input(shape=(32, 32, 3))
vgg16 = VGG16(include_top=False, weights='imagenet', input_tensor=input_tensor)
top_model = Sequential()
top_model.add(Flatten(input_shape=vgg16.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
#Drop out
#---------------------------
top_model.add(Dropout(rate=0.5))
#---------------------------
top_model.add(Dense(10, activation='softmax'))
#Model concatenation
model = Model(inputs=vgg16.input, outputs=top_model(vgg16.output))
#Fixed weight of vgg16
for layer in model.layers[:15]:
layer.trainable = False
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])
model.summary()
#Acquisition of learning process
history = model.fit(X_train, y_train, batch_size=100, epochs=1, validation_data=(X_test, y_test))
Let's implement the Cifar10 classification model by CNN.
import keras
from keras.datasets import cifar10
from keras.layers import Activation, Conv2D, Dense, Dropout, Flatten, MaxPooling2D
from keras.models import Sequential, load_model
from keras.utils.np_utils import to_categorical
import numpy as np
import matplotlib.pyplot as plt
#Data loading
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
#Model definition
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=X_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
#compile
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
model.summary()
#Learning
model.fit(X_train, y_train, batch_size=32, epochs=1)
#Evaluation of accuracy
scores = model.evaluate(X_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
#Data visualization (first 10 sheets of test data)
for i in range(10):
plt.subplot(1, 10, i+1)
plt.imshow(X_test[i])
plt.show()
#Prediction (first 10 sheets of test data)
pred = np.argmax(model.predict(X_test[0:10]), axis=1)
print(pred)
Recommended Posts