As the title says, I tried tensorflow for the first time and struggled to build an environment, so I wrote it. Qiita This is my first time writing, so the explanation may not be good, but thank you.
I will give you a rough idea of the environment I built.
Download the Graphical Installer below from Anaconda3, and select the OS and bit according to your environment.
You can leave the installation, check, etc. according to the installer's guide.
After installation, launch Anaconda prompt
Download the one for your environment from the list under tensorflow pip guide to install tensorflow-gpu 2.30
After downloading the tensorflow package file,
Install by typing pip install --upgrade [downloaded file]
and Anaconda prompt
From here on, it's for people who want to use Nvidia GPUs.
According to the article I referred to, tensorflow 2.3 seems to work with CUDA 10.1
, cuDNN 7.6
. It worked with this combination in my environment as well.
Please install CUDA, cuDNN according to the version of tensorflow.
-Note on the version of CUDA, cuDNN where tensorflow-gpu works
Also, in order to use GPU with tensorflow, it seems that you have to set the memory usage. To do this, write the following code at the beginning of the code.
python
physical_devices = tf.config.list_physical_devices('GPU')
if len(physical_devices) > 0:
for device in physical_devices:
tf.config.experimental.set_memory_growth(device, True)
print('{} memory growth: {}'.format(device, tf.config.experimental.get_memory_growth(device)))
else:
print("Not enough GPU hardware devices available")
The code description depends on the version of tensorflow, so please refer to the article I referred to.
-How to reduce GPU memory usage with tensorflow2.0 + keras Keras2.0
Let's try the image classification using the neural network in the tutorial of tensorflow. I've added the code above to process it on the GPU.
newralnet_demo.py
#TensorFlow and tf.import keras
import tensorflow as tf
from tensorflow import keras
#Import helper library
import numpy as np
import matplotlib.pyplot as plt
# import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# print(tf.__version__)
# GPU_settings
physical_devices = tf.config.list_physical_devices('GPU')
if len(physical_devices) > 0:
for device in physical_devices:
tf.config.experimental.set_memory_growth(device, True)
print('{} memory growth: {}'.format(
device, tf.config.experimental.get_memory_growth(device)))
else:
print("Not enough GPU hardware devices available")
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images,
test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
train_images.shape
len(train_labels)
train_labels
test_images.shape
len(test_labels)
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(10, 10))
for i in range(25):
plt.subplot(5, 5, i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
predictions = model.predict(test_images)
predictions[0]
np.argmax(predictions[0])
test_labels[0]
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
i = 0
plt.figure(figsize=(6, 3))
plt.subplot(1, 2, 1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1, 2, 2)
plot_value_array(i, predictions, test_labels)
plt.show()
i = 12
plt.figure(figsize=(6, 3))
plt.subplot(1, 2, 1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1, 2, 2)
plot_value_array(i, predictions, test_labels)
plt.show()
#Shows X test images, predicted labels, and correct labels.
#Correct predictions are shown in blue and wrong predictions are shown in red.
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
plt.show()
#Extract one image from the test dataset
img = test_images[0]
print(img.shape)
#Make an image a member of only one batch
img = (np.expand_dims(img, 0))
print(img.shape)
predictions_single = model.predict(img)
print(predictions_single)
plot_value_array(0, predictions_single, test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
np.argmax(predictions_single[0])
If the process is successful, the images will be sorted and displayed in a list.
I referred to the website of Kunihiko Kaneko Lab. It was very helpful. Thank you very much.
-First Neural Network: Introduction to Classification Problems -Note on the version of CUDA, cuDNN where tensorflow-gpu works -Install TensorFlow 2.2 (GPU compatible) (on Windows) -How to reduce GPU memory usage with tensorflow2.0 + keras