For Keras autoencoder, I referred to the following blog. http://blog.keras.io/building-autoencoders-in-keras.html In the blog, MNIST is used as input data.
There are many examples of feature extraction using images such as MNIST, but in this article we will deal with ** signals other than images **.
As preprocessing, the input data of AutoEncoder is used here as the measurement data of the acceleration sensor with values from -8 to +8. Then, let the data variable be windoW. Then, let the number of nodes in the hidden layer be eight.
def SaveDicDataFromFileNPZ(PATH,name,data):
if not ( os.path.exists(PATH) ): os.makedirs(PATH)
np.savez(PATH+name, data=data)
#Set PATH to store each parameter
SensorName='sensor1'
SaveFileNameEncord=SensorName+'_AccX_encoded'
SaveFileNameDecord=SensorName+'_AccX_decoded'
SaveFileNameNet=SensorName+'_AccX_net'
SaveFileNameTrain=SensorName+'_AccX_train'
SaveFileNameTest=SensorName+'_AccX_test'
SaveFileNameGlaph=GlaphDataPath+SensorName+'_AccX_loss_val_loss.png'
window_test=windoW
window_train=windoW
encoding_dim = 8
shapeNum=windoW.shape[0]*windoW.shape[1]
# this is our input placeholder
input_img = Input(shape=(shapeNum,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='tanh')(input_img)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(shapeNum, activation='linear')(encoded)
# this model maps an input to its reconstruction
autoencoder = Model(input=input_img, output=decoded)
# this model maps an input to its encoded representation
encoder = Model(input=input_img, output=encoded)
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(input=encoded_input, output=decoder_layer(encoded_input))
autoencoder.compile(optimizer='adadelta', loss='mse')
plot(autoencoder, to_file=StudyDataModelPicPath+SaveFileNameNet+'.png')
hist = autoencoder.fit(window_train, window_train,
nb_epoch=50,
batch_size=shapeNum/4,
shuffle=True,
validation_data=(window_test, window_test))
#Save the encoded and decoded objects
encoded_imgs = encoder.predict(window_test)
decoded_imgs = decoder.predict(encoded_imgs)
processing.SaveDicDataFromFileNPZ(StudyDataPath,SaveFileNameEncord,encoded_imgs)
processing.SaveDicDataFromFileNPZ(StudyDataPath,SaveFileNameDecord,decoded_imgs)
#Save parameters such as weight and bias for each layer
json_string = encoder.to_json()
open(StudyDataPath+SaveFileNameEncord+'.json', 'w').write(json_string)
encoder.save_weights(StudyDataPath+SaveFileNameEncord+'_weights.h5')
json_string = decoder.to_json()
open(StudyDataPath+SaveFileNameDecord+'.json', 'w').write(json_string)
decoder.save_weights(StudyDataPath+SaveFileNameDecord+'_weights.h5')
json_string = autoencoder.to_json()
open(StudyDataPath+SaveFileNameNet+'.json', 'w').write(json_string)
autoencoder.save_weights(StudyDataPath+SaveFileNameNet+'_weights.h5')
#Display the loss on the graph
loss = hist.history['loss']
val_loss = hist.history['val_loss']
nb_epoch = len(loss)
plt.plot(range(nb_epoch), loss, marker='.', label='loss')
plt.plot(range(nb_epoch), val_loss, marker='.', label='val_loss')
plt.legend(loc='best', fontsize=10)
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.savefig(SaveFileNameGlaph)
plt.show()
What is output is a graph as shown in the figure below.
Recommended Posts