This paper introduced the convolutional neural network obtained state-of-the-art performance at object recognition on the CIFAR-10 image datasaet in 2015.

Required Packages

import sys
import tensorflow as tf
from tensorflow.keras import utils

import numpy as np
import pandas as pd

import matplotlib as mpl
import matplotlib.pyplot as plt
from PIL import Image

plt.rcParams['figure.figsize'] = (8, 8)

Version check

print('Python: {}'.format(sys.version))
print('Numpy: {}'.format(np.__version__))
print('Pandas: {}'.format(pd.__version__))
print('Tensorflow: {}'.format(tf.__version__))
Python: 3.7.6 (default, Jan  8 2020, 20:23:39) [MSC v.1916 64 bit (AMD64)]
Numpy: 1.18.1
Pandas: 1.0.1
Tensorflow: 2.1.0

Prepare dataset

The original data is from here

from tensorflow.keras.datasets import cifar10

# Load the data
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
print("Training Images: {}".format(X_train.shape))
print("Test images: {}".format(X_test.shape))
Training Images: (50000, 32, 32, 3)
Test images: (10000, 32, 32, 3)
print(X_train[0].shape)
(32, 32, 3)
for i in range(9):
    plt.subplot(330 + 1 + i)
    img = X_train[i]
    plt.imshow(img)
    plt.title(labels[int(y_train[i][0])])
    plt.axis('off')
plt.tight_layout()

Preprocess Data

seed = 6
np.random.seed(seed)

# Normalize the inputs from 0-255 to 0.0-1.0
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')

X_train /= 255.0
X_test /= 255.0
print(y_train.shape)
print(y_train[0])
(50000, 1)
[6]
y_train = utils.to_categorical(y_train)
y_test = utils.to_categorical(y_test)
num_class = y_test.shape[1]

print(y_train.shape)
print(y_train[0])
(50000, 10)
[0. 0. 0. 0. 0. 0. 1. 0. 0. 0.]

Building the All-CNN model

In this project, we`ll implement ALL-CNN-C model

  • Input - 32 x 32 RGB image
  • Layer Architecture
    • 3x3 conv. 96 ReLU
    • 3x3 conv. 96 ReLU
    • 3x3 max-pooling stride 2
    • 3x3 conv. 192 ReLU
    • 3x3 conv. 192 ReLU
    • 3x3 max-pooling stride 2
    • 3x3 conv. 192 ReLU
    • 1x1 conv. 192 ReLU
    • 1x1 conv. 10 ReLU
    • global averaging over 6x6 spatial dimensions
    • 10-way softmax
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dropout, Conv2D, Dense, GlobalAvgPool2D, MaxPool2D
from tensorflow.keras.optimizers import SGD
def allcnn(weights=None):
    # define model type = Sequential
    model = Sequential()
    
    # Add model layer
    model.add(Conv2D(96, kernel_size=(3, 3), padding='same', input_shape=(32, 32, 3), activation='relu'))
    model.add(Conv2D(96, kernel_size=(3, 3), padding='same', activation='relu'))
    model.add(MaxPool2D(pool_size=(3, 3), padding='same', strides=(2, 2)))
    model.add(Dropout(0.5))
    
    model.add(Conv2D(192, kernel_size=(3, 3), padding='same', activation='relu'))
    model.add(Conv2D(192, kernel_size=(3, 3), padding='same', activation='relu'))
    model.add(MaxPool2D(pool_size=(3, 3), padding='same', strides=(2, 2)))
    model.add(Dropout(0.5))
    
    model.add(Conv2D(192, kernel_size=(3, 3), padding='same', activation= 'relu'))
    model.add(Conv2D(192, kernel_size=(1, 1), padding='same', activation= 'relu'))
    model.add(Conv2D(10, kernel_size=(1, 1), padding='same', activation='relu'))
    
    model.add(GlobalAvgPool2D())
    model.add(Dense(10, activation='softmax'))
    
    # Load the weights
    if weights:
        model.load_weights(weights)
        
    return model
learning_rate = 0.01
weight_decay = 1e-6
momentum = 0.9

# Parameter Grid Search
model = allcnn()

# Define optimizer and compile model
sgd = SGD(learning_rate=learning_rate, decay=weight_decay, momentum=momentum, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

print(model.summary())
Model: "sequential_8"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_39 (Conv2D)           (None, 32, 32, 96)        2688      
_________________________________________________________________
conv2d_40 (Conv2D)           (None, 32, 32, 96)        83040     
_________________________________________________________________
max_pooling2d_10 (MaxPooling (None, 16, 16, 96)        0         
_________________________________________________________________
dropout_10 (Dropout)         (None, 16, 16, 96)        0         
_________________________________________________________________
conv2d_41 (Conv2D)           (None, 16, 16, 192)       166080    
_________________________________________________________________
conv2d_42 (Conv2D)           (None, 16, 16, 192)       331968    
_________________________________________________________________
max_pooling2d_11 (MaxPooling (None, 8, 8, 192)         0         
_________________________________________________________________
dropout_11 (Dropout)         (None, 8, 8, 192)         0         
_________________________________________________________________
conv2d_43 (Conv2D)           (None, 8, 8, 192)         331968    
_________________________________________________________________
conv2d_44 (Conv2D)           (None, 8, 8, 192)         37056     
_________________________________________________________________
conv2d_45 (Conv2D)           (None, 8, 8, 10)          1930      
_________________________________________________________________
global_average_pooling2d_4 ( (None, 10)                0         
_________________________________________________________________
dense_4 (Dense)              (None, 10)                110       
=================================================================
Total params: 954,840
Trainable params: 954,840
Non-trainable params: 0
_________________________________________________________________
None
from tensorflow.keras.callbacks import ModelCheckpoint

checkpoint = ModelCheckpoint('best_weights.hdf5', monitor='val_loss', save_best_only=True)

# Define additional training parameters
epochs = 100
batch_size = 32

# Fit the model
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, 
          batch_size=batch_size, verbose=1, callbacks=[checkpoint])
Train on 50000 samples, validate on 10000 samples
Epoch 1/100
50000/50000 [==============================] - 11s 218us/sample - loss: 1.9512 - accuracy: 0.2375 - val_loss: 1.7935 - val_accuracy: 0.3067
Epoch 2/100
50000/50000 [==============================] - 11s 211us/sample - loss: 1.5540 - accuracy: 0.4074 - val_loss: 1.3599 - val_accuracy: 0.4934
Epoch 3/100
50000/50000 [==============================] - 11s 216us/sample - loss: 1.2937 - accuracy: 0.5223 - val_loss: 1.1712 - val_accuracy: 0.5708
Epoch 4/100
50000/50000 [==============================] - 11s 217us/sample - loss: 1.1107 - accuracy: 0.5983 - val_loss: 1.0279 - val_accuracy: 0.6302
Epoch 5/100
50000/50000 [==============================] - 11s 213us/sample - loss: 0.9708 - accuracy: 0.6483 - val_loss: 0.8836 - val_accuracy: 0.6837
Epoch 6/100
50000/50000 [==============================] - 11s 213us/sample - loss: 0.8706 - accuracy: 0.6919 - val_loss: 0.8344 - val_accuracy: 0.7038
Epoch 7/100
50000/50000 [==============================] - 11s 214us/sample - loss: 0.7842 - accuracy: 0.7222 - val_loss: 0.8831 - val_accuracy: 0.6912
Epoch 8/100
50000/50000 [==============================] - 11s 218us/sample - loss: 0.7187 - accuracy: 0.7469 - val_loss: 0.7148 - val_accuracy: 0.7493
Epoch 9/100
50000/50000 [==============================] - 11s 212us/sample - loss: 0.6577 - accuracy: 0.7688 - val_loss: 0.6144 - val_accuracy: 0.7871
Epoch 10/100
50000/50000 [==============================] - 11s 212us/sample - loss: 0.6105 - accuracy: 0.7864 - val_loss: 0.6142 - val_accuracy: 0.7877
Epoch 11/100
50000/50000 [==============================] - 10s 208us/sample - loss: 0.5741 - accuracy: 0.7990 - val_loss: 0.5857 - val_accuracy: 0.8023
Epoch 12/100
50000/50000 [==============================] - 11s 211us/sample - loss: 0.5434 - accuracy: 0.8094 - val_loss: 0.5582 - val_accuracy: 0.8128
Epoch 13/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.5121 - accuracy: 0.8215 - val_loss: 0.5465 - val_accuracy: 0.8111
Epoch 14/100
50000/50000 [==============================] - 10s 208us/sample - loss: 0.4844 - accuracy: 0.8303 - val_loss: 0.5103 - val_accuracy: 0.8259
Epoch 15/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.4639 - accuracy: 0.8381 - val_loss: 0.5382 - val_accuracy: 0.8232
Epoch 16/100
50000/50000 [==============================] - 10s 205us/sample - loss: 0.4445 - accuracy: 0.8436 - val_loss: 0.5216 - val_accuracy: 0.8292
Epoch 17/100
50000/50000 [==============================] - 10s 208us/sample - loss: 0.4258 - accuracy: 0.8507 - val_loss: 0.4929 - val_accuracy: 0.8363
Epoch 18/100
50000/50000 [==============================] - 10s 205us/sample - loss: 0.4068 - accuracy: 0.8563 - val_loss: 0.5049 - val_accuracy: 0.8325
Epoch 19/100
50000/50000 [==============================] - 10s 205us/sample - loss: 0.3917 - accuracy: 0.8616 - val_loss: 0.5115 - val_accuracy: 0.8349
Epoch 20/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.3772 - accuracy: 0.8677 - val_loss: 0.4758 - val_accuracy: 0.8449
Epoch 21/100
50000/50000 [==============================] - 10s 205us/sample - loss: 0.3665 - accuracy: 0.8701 - val_loss: 0.4788 - val_accuracy: 0.8431
Epoch 22/100
50000/50000 [==============================] - 10s 205us/sample - loss: 0.3534 - accuracy: 0.8752 - val_loss: 0.4976 - val_accuracy: 0.8393
Epoch 23/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.3461 - accuracy: 0.8770 - val_loss: 0.4735 - val_accuracy: 0.8452
Epoch 24/100
50000/50000 [==============================] - 10s 205us/sample - loss: 0.3298 - accuracy: 0.8828 - val_loss: 0.4889 - val_accuracy: 0.8456
Epoch 25/100
50000/50000 [==============================] - 10s 205us/sample - loss: 0.3220 - accuracy: 0.8862 - val_loss: 0.4937 - val_accuracy: 0.8445
Epoch 26/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.3155 - accuracy: 0.8882 - val_loss: 0.4512 - val_accuracy: 0.8530
Epoch 27/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.3057 - accuracy: 0.8913 - val_loss: 0.5041 - val_accuracy: 0.8447
Epoch 28/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.2955 - accuracy: 0.8951 - val_loss: 0.5083 - val_accuracy: 0.8474
Epoch 29/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.2910 - accuracy: 0.8964 - val_loss: 0.5262 - val_accuracy: 0.8432
Epoch 30/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.2867 - accuracy: 0.8974 - val_loss: 0.4747 - val_accuracy: 0.8564
Epoch 31/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.2738 - accuracy: 0.9029 - val_loss: 0.4954 - val_accuracy: 0.8509
Epoch 32/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.2679 - accuracy: 0.9047 - val_loss: 0.4716 - val_accuracy: 0.8597
Epoch 33/100
50000/50000 [==============================] - 10s 209us/sample - loss: 0.2643 - accuracy: 0.9060 - val_loss: 0.5434 - val_accuracy: 0.8457
Epoch 34/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.2567 - accuracy: 0.9099 - val_loss: 0.5056 - val_accuracy: 0.8520
Epoch 35/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.2556 - accuracy: 0.9080 - val_loss: 0.5048 - val_accuracy: 0.8489
Epoch 36/100
50000/50000 [==============================] - 10s 208us/sample - loss: 0.2483 - accuracy: 0.9117 - val_loss: 0.4827 - val_accuracy: 0.8579
Epoch 37/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.2428 - accuracy: 0.9151 - val_loss: 0.5077 - val_accuracy: 0.8509
Epoch 38/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.2391 - accuracy: 0.9150 - val_loss: 0.5151 - val_accuracy: 0.8542
Epoch 39/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.2393 - accuracy: 0.9155 - val_loss: 0.4748 - val_accuracy: 0.8594
Epoch 40/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.2329 - accuracy: 0.9171 - val_loss: 0.4654 - val_accuracy: 0.8653
Epoch 41/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.2255 - accuracy: 0.9191 - val_loss: 0.5062 - val_accuracy: 0.8573
Epoch 42/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.2266 - accuracy: 0.9201 - val_loss: 0.5217 - val_accuracy: 0.8610
Epoch 43/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.2242 - accuracy: 0.9207 - val_loss: 0.4751 - val_accuracy: 0.8644
Epoch 44/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.2152 - accuracy: 0.9235 - val_loss: 0.5211 - val_accuracy: 0.8579
Epoch 45/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.2166 - accuracy: 0.9231 - val_loss: 0.4649 - val_accuracy: 0.8697
Epoch 46/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.2114 - accuracy: 0.9246 - val_loss: 0.5254 - val_accuracy: 0.8570
Epoch 47/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.2087 - accuracy: 0.9247 - val_loss: 0.4847 - val_accuracy: 0.8628
Epoch 48/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.2086 - accuracy: 0.9267 - val_loss: 0.5284 - val_accuracy: 0.8564
Epoch 49/100
50000/50000 [==============================] - 10s 208us/sample - loss: 0.2013 - accuracy: 0.9284 - val_loss: 0.5377 - val_accuracy: 0.8554
Epoch 50/100
50000/50000 [==============================] - 10s 208us/sample - loss: 0.2010 - accuracy: 0.9288 - val_loss: 0.4966 - val_accuracy: 0.8664
Epoch 51/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.1942 - accuracy: 0.9299 - val_loss: 0.5608 - val_accuracy: 0.8558
Epoch 52/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.1926 - accuracy: 0.9318 - val_loss: 0.5056 - val_accuracy: 0.8673
Epoch 53/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.1949 - accuracy: 0.9311 - val_loss: 0.4883 - val_accuracy: 0.8635
Epoch 54/100
50000/50000 [==============================] - 10s 205us/sample - loss: 0.1888 - accuracy: 0.9332 - val_loss: 0.5422 - val_accuracy: 0.8574
Epoch 55/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.1885 - accuracy: 0.9330 - val_loss: 0.5360 - val_accuracy: 0.8629
Epoch 56/100
50000/50000 [==============================] - 10s 205us/sample - loss: 0.1882 - accuracy: 0.9325 - val_loss: 0.5075 - val_accuracy: 0.8679
Epoch 57/100
50000/50000 [==============================] - 10s 205us/sample - loss: 0.1834 - accuracy: 0.9348 - val_loss: 0.5165 - val_accuracy: 0.8645
Epoch 58/100
50000/50000 [==============================] - 10s 205us/sample - loss: 0.1809 - accuracy: 0.9354 - val_loss: 0.5259 - val_accuracy: 0.8639
Epoch 59/100
50000/50000 [==============================] - 10s 205us/sample - loss: 0.1738 - accuracy: 0.9384 - val_loss: 0.5745 - val_accuracy: 0.8530
Epoch 60/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.1698 - accuracy: 0.9405 - val_loss: 0.5370 - val_accuracy: 0.8645
Epoch 61/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.1744 - accuracy: 0.9381 - val_loss: 0.5868 - val_accuracy: 0.8539
Epoch 62/100
50000/50000 [==============================] - 10s 205us/sample - loss: 0.1748 - accuracy: 0.9380 - val_loss: 0.5534 - val_accuracy: 0.8665
Epoch 63/100
50000/50000 [==============================] - 10s 205us/sample - loss: 0.1669 - accuracy: 0.9408 - val_loss: 0.5320 - val_accuracy: 0.8667
Epoch 64/100
50000/50000 [==============================] - 10s 205us/sample - loss: 0.1634 - accuracy: 0.9422 - val_loss: 0.5764 - val_accuracy: 0.8621
Epoch 65/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.1639 - accuracy: 0.9415 - val_loss: 0.5295 - val_accuracy: 0.8664
Epoch 66/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.1604 - accuracy: 0.9435 - val_loss: 0.5840 - val_accuracy: 0.8638
Epoch 67/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.1633 - accuracy: 0.9422 - val_loss: 0.5513 - val_accuracy: 0.8629
Epoch 68/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.1592 - accuracy: 0.9442 - val_loss: 0.5619 - val_accuracy: 0.8599
Epoch 69/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.1652 - accuracy: 0.9417 - val_loss: 0.5597 - val_accuracy: 0.8650
Epoch 70/100
50000/50000 [==============================] - 10s 205us/sample - loss: 0.1598 - accuracy: 0.9435 - val_loss: 0.5407 - val_accuracy: 0.8673
Epoch 71/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.1539 - accuracy: 0.9450 - val_loss: 0.5509 - val_accuracy: 0.8651
Epoch 72/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.1525 - accuracy: 0.9454 - val_loss: 0.5781 - val_accuracy: 0.8675
Epoch 73/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.1584 - accuracy: 0.9447 - val_loss: 0.5658 - val_accuracy: 0.8579
Epoch 74/100
50000/50000 [==============================] - 10s 206us/sample - loss: 0.1502 - accuracy: 0.9473 - val_loss: 0.6248 - val_accuracy: 0.8586
Epoch 75/100
50000/50000 [==============================] - 10s 209us/sample - loss: 0.1522 - accuracy: 0.9452 - val_loss: 0.5733 - val_accuracy: 0.8632
Epoch 76/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.1500 - accuracy: 0.9459 - val_loss: 0.5960 - val_accuracy: 0.8628
Epoch 77/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.1447 - accuracy: 0.9492 - val_loss: 0.5895 - val_accuracy: 0.8644
Epoch 78/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.1437 - accuracy: 0.9499 - val_loss: 0.5247 - val_accuracy: 0.8641
Epoch 79/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.1454 - accuracy: 0.9480 - val_loss: 0.5667 - val_accuracy: 0.8658
Epoch 80/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.1432 - accuracy: 0.9497 - val_loss: 0.5354 - val_accuracy: 0.8723
Epoch 81/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.1437 - accuracy: 0.9505 - val_loss: 0.5759 - val_accuracy: 0.8647
Epoch 82/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.1418 - accuracy: 0.9500 - val_loss: 0.5877 - val_accuracy: 0.8703
Epoch 83/100
50000/50000 [==============================] - 10s 208us/sample - loss: 0.1387 - accuracy: 0.9505 - val_loss: 0.5735 - val_accuracy: 0.8690
Epoch 84/100
50000/50000 [==============================] - 10s 208us/sample - loss: 0.1407 - accuracy: 0.9510 - val_loss: 0.5740 - val_accuracy: 0.8737
Epoch 85/100
50000/50000 [==============================] - 10s 209us/sample - loss: 0.1334 - accuracy: 0.9532 - val_loss: 0.5730 - val_accuracy: 0.8710
Epoch 86/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.1342 - accuracy: 0.9532 - val_loss: 0.5903 - val_accuracy: 0.8663
Epoch 87/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.1328 - accuracy: 0.9540 - val_loss: 0.5985 - val_accuracy: 0.8659
Epoch 88/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.1376 - accuracy: 0.9514 - val_loss: 0.5755 - val_accuracy: 0.8648
Epoch 89/100
50000/50000 [==============================] - 10s 208us/sample - loss: 0.1319 - accuracy: 0.9536 - val_loss: 0.5957 - val_accuracy: 0.8661
Epoch 90/100
50000/50000 [==============================] - 10s 208us/sample - loss: 0.1333 - accuracy: 0.9530 - val_loss: 0.5988 - val_accuracy: 0.8633
Epoch 91/100
50000/50000 [==============================] - 10s 209us/sample - loss: 0.1282 - accuracy: 0.9554 - val_loss: 0.6002 - val_accuracy: 0.8674
Epoch 92/100
50000/50000 [==============================] - 10s 208us/sample - loss: 0.1333 - accuracy: 0.9531 - val_loss: 0.5531 - val_accuracy: 0.8706
Epoch 93/100
50000/50000 [==============================] - 10s 208us/sample - loss: 0.1310 - accuracy: 0.9544 - val_loss: 0.5540 - val_accuracy: 0.8720
Epoch 94/100
50000/50000 [==============================] - 10s 208us/sample - loss: 0.1241 - accuracy: 0.9567 - val_loss: 0.6359 - val_accuracy: 0.8632
Epoch 95/100
50000/50000 [==============================] - 10s 207us/sample - loss: 0.1281 - accuracy: 0.9551 - val_loss: 0.6134 - val_accuracy: 0.8645
Epoch 96/100
50000/50000 [==============================] - 10s 208us/sample - loss: 0.1275 - accuracy: 0.9557 - val_loss: 0.5925 - val_accuracy: 0.8708
Epoch 97/100
50000/50000 [==============================] - 10s 208us/sample - loss: 0.1226 - accuracy: 0.9574 - val_loss: 0.6412 - val_accuracy: 0.8678
Epoch 98/100
50000/50000 [==============================] - 10s 208us/sample - loss: 0.1239 - accuracy: 0.9570 - val_loss: 0.6448 - val_accuracy: 0.8597
Epoch 99/100
50000/50000 [==============================] - 10s 208us/sample - loss: 0.1227 - accuracy: 0.9574 - val_loss: 0.5598 - val_accuracy: 0.8750
Epoch 100/100
50000/50000 [==============================] - 10s 208us/sample - loss: 0.1250 - accuracy: 0.9565 - val_loss: 0.6793 - val_accuracy: 0.8644

Uh oh. It's apparent that training this deep convolutional neural network is going to take a long time, which is not surprising considering the network has about almost 1 million parameters. Updating this many parameters takes a considerable amount of time; unless, of course, you are using a Graphics Processing Unit (GPU). This is a good time for a quick lesson on the differences between CPUs and GPUs.

The central processing unit (CPU) is often called the brains of the PC because it handles the majority of necessary computations. All computers have a CPU and this is what Keras and Theano automatically utilize.

The graphics processing unit (GPU) is in charge of image rendering. The most advanced GPUs were originally designed for gamers; however, GPU-accelerated computing, the use of a GPU together with a CPU to accelarate deep learing, analytics, and engineering applications, has become increasingly common. In fact, the training of deep neural networks is not realistic without them.

The most common GPUs for deep learning are produced by NVIDIA. Furthermore, the NVIDIA Deep Learning SDK provides high-performance tools and libraries to power GPU-accelerated machine learning applications. An alternative would be an AMD GPU in combination with the OpenCL libraries; however, these libraries have fewer active users and less support than the NVIDIA libraries.

If your computer has an NVIDIA GPU, installing the CUDA Drivers and CUDA Tookit from NVIDIA will allow tensorflow to utilize GPU-accelerated computing. The original paper mentions that it took approximately 10 hours to train the All-CNN network for 350 epochs using a modern GPU, which is considerably faster (several orders of magnitude) than it would take to train on CPU.

plt.plot(history.history['loss'], label='train loss');
# Plot the validation loss
plt.plot(history.history['val_loss'], label='validation loss');
plt.title('train_loss vs. validation loss on CIFAR-10')
plt.legend();

Model Inference

model_l = allcnn(weights='best_weights.hdf5')
model_l.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

print(model_l.summary())

scores = model_l.evaluate(X_test, y_test, verbose=1)
print('Accuracy: {}'.format(scores[1]))
Model: "sequential_9"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_46 (Conv2D)           (None, 32, 32, 96)        2688      
_________________________________________________________________
conv2d_47 (Conv2D)           (None, 32, 32, 96)        83040     
_________________________________________________________________
max_pooling2d_12 (MaxPooling (None, 16, 16, 96)        0         
_________________________________________________________________
dropout_12 (Dropout)         (None, 16, 16, 96)        0         
_________________________________________________________________
conv2d_48 (Conv2D)           (None, 16, 16, 192)       166080    
_________________________________________________________________
conv2d_49 (Conv2D)           (None, 16, 16, 192)       331968    
_________________________________________________________________
max_pooling2d_13 (MaxPooling (None, 8, 8, 192)         0         
_________________________________________________________________
dropout_13 (Dropout)         (None, 8, 8, 192)         0         
_________________________________________________________________
conv2d_50 (Conv2D)           (None, 8, 8, 192)         331968    
_________________________________________________________________
conv2d_51 (Conv2D)           (None, 8, 8, 192)         37056     
_________________________________________________________________
conv2d_52 (Conv2D)           (None, 8, 8, 10)          1930      
_________________________________________________________________
global_average_pooling2d_5 ( (None, 10)                0         
_________________________________________________________________
dense_5 (Dense)              (None, 10)                110       
=================================================================
Total params: 954,840
Trainable params: 954,840
Non-trainable params: 0
_________________________________________________________________
None
10000/10000 [==============================] - 1s 96us/sample - loss: 0.4512 - accuracy: 0.8530
Accuracy: 0.8529999852180481
classes = range(0, 10)

# zip the names and labels to make a dictionary of class labels
class_labels = dict(zip(classes, labels))
print(class_labels)
{0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck'}
batch = X_test[100:109]
ground_truth = np.argmax(y_test[100:109], axis=-1)

# Make predictions
predictions = model.predict(batch, verbose=1)
9/9 [==============================] - 0s 3ms/sample
class_result = np.argmax(predictions, axis=-1)
class_result
array([4, 5, 6, 3, 1, 1, 3, 6, 8], dtype=int64)
fig, axs = plt.subplots(3, 3, figsize=(15, 6))
fig.subplots_adjust(hspace=1)
axs = axs.flatten()

for i, img in enumerate(batch):
    # Determine label for each prediction, set title
    for key, value in class_labels.items():
        if class_result[i] == key:
            title = 'prediction: {} \n Actual: {}'.format(class_labels[key], class_labels[ground_truth[i]])
            axs[i].set_title(title)
            axs[i].axes.get_xaxis().set_visible(False)
            axs[i].axes.get_yaxis().set_visible(False)
            
    # Plot the image
    axs[i].imshow(img)