In [1]:
import numpy as np
from keras import models, layers, datasets, backend
from keras.utils import np_utils
import matplotlib.pyplot as plt
import numpy as np
import keras
In [2]:
(X_train, y_train),(X_test, y_test) = datasets.fashion_mnist.load_data()
In [3]:
plt.imshow(X_train[0])
Out[3]:
<matplotlib.image.AxesImage at 0x7fcf8dd53a90>
In [4]:
num_classes = 10
num_batch_size = 100
num_epochs = 50
validation_split = 0.1
In [5]:
'''
cnn을 다룰때는 1차원으로 값을 펼치지 않고, 이미지를 통으로 넣기 때문에
채널이 중요하다
흑백이미지라도 1이라는 값을 넣어줘야한다.
채널을 표현하는 방법
1) channel First 1,28,28
2) channel Last 28,28,1
여기서는 channel Last로 되어있다... ToTensor를 안 써주므로, 기존 값인 channel로 되어있고,
이것(채널)을 알아내는 방법은 backend로 알아 볼 수 있다.
'''
print(X_train.shape)
print(X_train.shape[1:])
img_rows, img_cols = X_train.shape[1:]
(60000, 28, 28)
(28, 28)
In [6]:
channel = backend.image_data_format()
channel # 이를 통해서 backend로 알아 볼 수 있다.
Out[6]:
'channels_last'
In [7]:
if backend.image_data_format == 'chennel_first':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
In [8]:
X_train = X_train/255
X_test = X_test/255
Y_train = keras.utils.to_categorical(y_train, num_classes)
Y_test = keras.utils.to_categorical(y_test, num_classes)
In [10]:
model = keras.Sequential()
model.add(layers.Conv2D(32, kernel_size=(3,3), padding='same', input_shape = input_shape))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.Conv2D(32, kernel_size=(3,3), padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Conv2D(64, kernel_size=(3,3), padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.MaxPool2D(pool_size=(2,2)))
model.add(layers.Conv2D(128, kernel_size=(3,3), padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.MaxPool2D(pool_size=(2,2)))
model.add(layers.Dropout(0.33))
model.add(layers.Flatten())
model.add(layers.Dense(512))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.Dropout(0.4))
model.add(layers.Dense(64))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.Dense(num_classes, activation='softmax'))
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 28, 28, 32) 320
_________________________________________________________________
batch_normalization (BatchNo (None, 28, 28, 32) 128
_________________________________________________________________
activation (Activation) (None, 28, 28, 32) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 28, 28, 32) 9248
_________________________________________________________________
batch_normalization_1 (Batch (None, 28, 28, 32) 128
_________________________________________________________________
activation_1 (Activation) (None, 28, 28, 32) 0
_________________________________________________________________
dropout (Dropout) (None, 28, 28, 32) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 28, 28, 64) 18496
_________________________________________________________________
batch_normalization_2 (Batch (None, 28, 28, 64) 256
_________________________________________________________________
activation_2 (Activation) (None, 28, 28, 64) 0
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 14, 14, 64) 0
_________________________________________________________________
conv2d_3 (Conv2D) (None, 14, 14, 128) 73856
_________________________________________________________________
batch_normalization_3 (Batch (None, 14, 14, 128) 512
_________________________________________________________________
activation_3 (Activation) (None, 14, 14, 128) 0
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 7, 7, 128) 0
_________________________________________________________________
dropout_1 (Dropout) (None, 7, 7, 128) 0
_________________________________________________________________
flatten (Flatten) (None, 6272) 0
_________________________________________________________________
dense (Dense) (None, 512) 3211776
_________________________________________________________________
batch_normalization_4 (Batch (None, 512) 2048
_________________________________________________________________
activation_4 (Activation) (None, 512) 0
_________________________________________________________________
dropout_2 (Dropout) (None, 512) 0
_________________________________________________________________
dense_1 (Dense) (None, 64) 32832
_________________________________________________________________
batch_normalization_5 (Batch (None, 64) 256
_________________________________________________________________
activation_5 (Activation) (None, 64) 0
_________________________________________________________________
dense_2 (Dense) (None, 10) 650
=================================================================
Total params: 3,350,506
Trainable params: 3,348,842
Non-trainable params: 1,664
_________________________________________________________________
In [12]:
optim = keras.optimizers.Nadam(learning_rate=0.001)
# optim = keras.optimizers.Adam(learning_rate=0.0001)
# optim = keras.optimizers.RMSprop()
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=optim,
metrics=['accuracy'])
In [13]:
history = model.fit(X_train, Y_train,
batch_size = num_batch_size,
epochs = num_epochs,
validation_split = validation_split)
Epoch 1/50
540/540 [==============================] - 15s 21ms/step - loss: 0.5714 - accuracy: 0.8117 - val_loss: 0.3912 - val_accuracy: 0.8562
Epoch 2/50
540/540 [==============================] - 11s 20ms/step - loss: 0.2588 - accuracy: 0.9049 - val_loss: 0.2264 - val_accuracy: 0.9200
Epoch 3/50
540/540 [==============================] - 11s 20ms/step - loss: 0.2191 - accuracy: 0.9193 - val_loss: 0.2058 - val_accuracy: 0.9267
Epoch 4/50
540/540 [==============================] - 11s 20ms/step - loss: 0.1870 - accuracy: 0.9316 - val_loss: 0.2036 - val_accuracy: 0.9265
Epoch 5/50
540/540 [==============================] - 11s 20ms/step - loss: 0.1682 - accuracy: 0.9388 - val_loss: 0.1791 - val_accuracy: 0.9377
Epoch 6/50
540/540 [==============================] - 11s 20ms/step - loss: 0.1549 - accuracy: 0.9432 - val_loss: 0.2043 - val_accuracy: 0.9203
Epoch 7/50
540/540 [==============================] - 11s 20ms/step - loss: 0.1463 - accuracy: 0.9461 - val_loss: 0.1751 - val_accuracy: 0.9375
Epoch 8/50
540/540 [==============================] - 11s 20ms/step - loss: 0.1310 - accuracy: 0.9528 - val_loss: 0.1710 - val_accuracy: 0.9387
Epoch 9/50
540/540 [==============================] - 11s 20ms/step - loss: 0.1235 - accuracy: 0.9548 - val_loss: 0.1758 - val_accuracy: 0.9360
Epoch 10/50
540/540 [==============================] - 11s 20ms/step - loss: 0.1141 - accuracy: 0.9583 - val_loss: 0.1665 - val_accuracy: 0.9402
Epoch 11/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0981 - accuracy: 0.9623 - val_loss: 0.1680 - val_accuracy: 0.9403
Epoch 12/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0932 - accuracy: 0.9658 - val_loss: 0.1774 - val_accuracy: 0.9403
Epoch 13/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0873 - accuracy: 0.9687 - val_loss: 0.1617 - val_accuracy: 0.9428
Epoch 14/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0762 - accuracy: 0.9719 - val_loss: 0.1780 - val_accuracy: 0.9417
Epoch 15/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0731 - accuracy: 0.9737 - val_loss: 0.1884 - val_accuracy: 0.9382
Epoch 16/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0636 - accuracy: 0.9760 - val_loss: 0.1797 - val_accuracy: 0.9438
Epoch 17/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0606 - accuracy: 0.9773 - val_loss: 0.1940 - val_accuracy: 0.9438
Epoch 18/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0597 - accuracy: 0.9775 - val_loss: 0.1862 - val_accuracy: 0.9443
Epoch 19/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0561 - accuracy: 0.9788 - val_loss: 0.1765 - val_accuracy: 0.9432
Epoch 20/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0506 - accuracy: 0.9812 - val_loss: 0.1968 - val_accuracy: 0.9420
Epoch 21/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0490 - accuracy: 0.9819 - val_loss: 0.1867 - val_accuracy: 0.9438
Epoch 22/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0443 - accuracy: 0.9835 - val_loss: 0.1985 - val_accuracy: 0.9438
Epoch 23/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0406 - accuracy: 0.9849 - val_loss: 0.1996 - val_accuracy: 0.9435
Epoch 24/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0409 - accuracy: 0.9853 - val_loss: 0.1945 - val_accuracy: 0.9465
Epoch 25/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0408 - accuracy: 0.9854 - val_loss: 0.2067 - val_accuracy: 0.9410
Epoch 26/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0401 - accuracy: 0.9856 - val_loss: 0.2077 - val_accuracy: 0.9445
Epoch 27/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0352 - accuracy: 0.9868 - val_loss: 0.2337 - val_accuracy: 0.9425
Epoch 28/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0364 - accuracy: 0.9866 - val_loss: 0.2146 - val_accuracy: 0.9437
Epoch 29/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0344 - accuracy: 0.9876 - val_loss: 0.1954 - val_accuracy: 0.9455
Epoch 30/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0305 - accuracy: 0.9889 - val_loss: 0.2147 - val_accuracy: 0.9450
Epoch 31/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0319 - accuracy: 0.9893 - val_loss: 0.2040 - val_accuracy: 0.9438
Epoch 32/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0306 - accuracy: 0.9887 - val_loss: 0.2120 - val_accuracy: 0.9450
Epoch 33/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0303 - accuracy: 0.9890 - val_loss: 0.2089 - val_accuracy: 0.9443
Epoch 34/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0273 - accuracy: 0.9900 - val_loss: 0.2155 - val_accuracy: 0.9467
Epoch 35/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0294 - accuracy: 0.9899 - val_loss: 0.2173 - val_accuracy: 0.9437
Epoch 36/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0254 - accuracy: 0.9916 - val_loss: 0.2196 - val_accuracy: 0.9450
Epoch 37/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0266 - accuracy: 0.9904 - val_loss: 0.2251 - val_accuracy: 0.9442
Epoch 38/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0278 - accuracy: 0.9900 - val_loss: 0.2178 - val_accuracy: 0.9465
Epoch 39/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0239 - accuracy: 0.9918 - val_loss: 0.2104 - val_accuracy: 0.9483
Epoch 40/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0232 - accuracy: 0.9916 - val_loss: 0.2243 - val_accuracy: 0.9448
Epoch 41/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0245 - accuracy: 0.9909 - val_loss: 0.2404 - val_accuracy: 0.9387
Epoch 42/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0235 - accuracy: 0.9912 - val_loss: 0.2255 - val_accuracy: 0.9432
Epoch 43/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0212 - accuracy: 0.9924 - val_loss: 0.2766 - val_accuracy: 0.9413
Epoch 44/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0226 - accuracy: 0.9918 - val_loss: 0.2276 - val_accuracy: 0.9445
Epoch 45/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0224 - accuracy: 0.9920 - val_loss: 0.2348 - val_accuracy: 0.9417
Epoch 46/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0220 - accuracy: 0.9918 - val_loss: 0.2741 - val_accuracy: 0.9397
Epoch 47/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0228 - accuracy: 0.9922 - val_loss: 0.2434 - val_accuracy: 0.9462
Epoch 48/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0204 - accuracy: 0.9926 - val_loss: 0.2426 - val_accuracy: 0.9472
Epoch 49/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0207 - accuracy: 0.9930 - val_loss: 0.2226 - val_accuracy: 0.9443
Epoch 50/50
540/540 [==============================] - 11s 20ms/step - loss: 0.0185 - accuracy: 0.9934 - val_loss: 0.2319 - val_accuracy: 0.9480
In [14]:
# loss 관련된 plot 함수
def plot_loss(history):
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch') # X 측이 늘어남에 따라서 Y축이 얼마나 떨어지는가가 궁금한것이다.
plt.legend(['Train','Validation'])
plt.savefig('Loss_Plot.png', dpi=300) # dpi해상도를 조절 할 수 있다.
plt.show()
def plot_acc(history):
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train','Validation'])
plt.savefig('Acc_Plot.png', dpi=300) # dpi해상도를 조절 할 수 있다.
plt.show()
In [15]:
plot_loss(history)
In [16]:
plot_acc(history)
In [17]:
model_performance = model.evaluate(X_test,Y_test,batch_size=num_batch_size)
print(f'loss:{model_performance[0]}, accuracy:{model_performance[1]}')
print(*model_performance)
100/100 [==============================] - 1s 6ms/step - loss: 0.2717 - accuracy: 0.9442
loss:0.27170076966285706, accuracy:0.9441999793052673
0.27170076966285706 0.9441999793052673
'workSpace > PYTHON' 카테고리의 다른 글
[DL] get CIFAR10 Custom Datasets (0) | 2021.02.03 |
---|---|
[DL] MNIST analysis with CNN model using keras ( Accuracy 98.94 % ) (0) | 2021.02.03 |
[DL] CIFAR10 with CNN (0) | 2021.02.03 |
[DL] Warming up (0) | 2021.02.02 |
[PyTorch] WHAT is Frame work and WHY PyTorch (0) | 2021.02.02 |