import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

plt.rcParams['figure.figsize'] = (8, 8)

Tensors, layers, and autoencoders

  • Tensors
    • main data structures used in deep learning.
    • Inputs/Outputs and transformations in neural networks are all presented using tensors
  • Autoencoders ae
    • Use cases
      • Dimensionality reduction:
        • Smaller dimensional space representation of our inputs
      • De-noising data:
        • If trained with clean data, irrelevant noise will be filtered out during reconstruction
      • Anormaly detection
        • A poor reconstruction will result when the model is fed with unseen inputs

It's a flow of tensors

If you have already built a model, you can use the model.layers and the tf.keras.backend to build functions that, provided with a valid input tensor, return the corresponding output tensor.

This is a useful tool when we want to obtain the output of a network at an intermediate layer.

For instance, if you get the input and output from the first layer of a network, you can build an inp_to_out function that returns the result of carrying out forward propagation through only the first layer for a given input tensor.

So that's what you're going to do right now!

banknote = pd.read_csv('./dataset/banknotes.csv')
banknote.head()
variace skewness curtosis entropy class
0 3.62160 8.6661 -2.8073 -0.44699 0
1 4.54590 8.1674 -2.4586 -1.46210 0
2 3.86600 -2.6383 1.9242 0.10645 0
3 3.45660 9.5228 -4.0112 -3.59440 0
4 0.32924 -4.4552 4.5718 -0.98880 0
from sklearn.model_selection import train_test_split

X = banknote.drop(['class'], axis=1)
# Normalize data
X = ((X - X.mean()) / X.std()).to_numpy()
y = banknote['class']

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense

model = Sequential()
model.add(Dense(2, input_shape=(4, ), activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.summary()
Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_2 (Dense)              (None, 2)                 10        
_________________________________________________________________
dense_3 (Dense)              (None, 1)                 3         
=================================================================
Total params: 13
Trainable params: 13
Non-trainable params: 0
_________________________________________________________________
import tensorflow.keras.backend as K

# Input tensor from the 1st layer of the model
inp = model.layers[0].input

# Output tensor from the 1st layer of the model
out = model.layers[0].output

# Define a function from inputs to outputs
inp_to_out = K.function([inp], [out])

# Print the results of passing X_test through the 1st layer
print(inp_to_out([X_test]))
[array([[2.41337225e-01, 0.00000000e+00],
       [0.00000000e+00, 1.20011747e+00],
       [0.00000000e+00, 1.63594782e-01],
       [0.00000000e+00, 1.59020412e+00],
       [1.87821317e+00, 0.00000000e+00],
       [0.00000000e+00, 6.89649582e-01],
       [0.00000000e+00, 1.18970406e+00],
       [4.36099730e-02, 1.07809877e+00],
       [1.04074323e+00, 0.00000000e+00],
       [0.00000000e+00, 5.35320751e-02],
       [0.00000000e+00, 2.02383709e+00],
       [1.25902331e+00, 0.00000000e+00],
       [0.00000000e+00, 1.40000868e+00],
       [0.00000000e+00, 1.56177998e+00],
       [0.00000000e+00, 6.54872358e-02],
       [8.29047143e-01, 0.00000000e+00],
       [0.00000000e+00, 1.78345084e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 5.57610095e-01],
       [1.99707255e-01, 0.00000000e+00],
       [0.00000000e+00, 1.12326002e+00],
       [3.05941671e-01, 0.00000000e+00],
       [2.59623509e-02, 3.39980543e-01],
       [2.51296490e-01, 7.60058880e-01],
       [1.11693692e+00, 0.00000000e+00],
       [3.59373003e-01, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [1.09660923e+00, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [1.74600565e+00, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 1.42224920e+00],
       [0.00000000e+00, 0.00000000e+00],
       [6.89101696e-01, 9.70361847e-03],
       [5.81549853e-02, 0.00000000e+00],
       [1.87910652e+00, 0.00000000e+00],
       [0.00000000e+00, 6.40213490e-01],
       [5.75123906e-01, 0.00000000e+00],
       [0.00000000e+00, 8.35260630e-01],
       [2.75657684e-01, 0.00000000e+00],
       [1.42686933e-01, 0.00000000e+00],
       [2.02093244e-01, 4.92041698e-03],
       [1.58853674e+00, 0.00000000e+00],
       [0.00000000e+00, 1.84432423e+00],
       [0.00000000e+00, 7.26382136e-02],
       [0.00000000e+00, 1.55276164e-01],
       [9.23556983e-01, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [7.40955651e-01, 0.00000000e+00],
       [6.67462170e-01, 0.00000000e+00],
       [0.00000000e+00, 5.69366634e-01],
       [0.00000000e+00, 0.00000000e+00],
       [4.46129963e-02, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 8.84919345e-01],
       [0.00000000e+00, 4.88706790e-02],
       [4.17152226e-01, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [3.40811685e-02, 3.10283750e-01],
       [4.87485439e-01, 1.69685349e-01],
       [0.00000000e+00, 0.00000000e+00],
       [7.44812667e-01, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 9.42120612e-01],
       [1.20130338e-01, 0.00000000e+00],
       [0.00000000e+00, 1.76153326e+00],
       [0.00000000e+00, 1.87242591e+00],
       [2.43618563e-01, 0.00000000e+00],
       [0.00000000e+00, 4.31954920e-01],
       [5.43293953e-01, 3.34428340e-01],
       [0.00000000e+00, 5.05452871e-01],
       [1.08725083e+00, 0.00000000e+00],
       [4.61324573e-01, 0.00000000e+00],
       [0.00000000e+00, 1.22714825e-01],
       [3.03097039e-01, 0.00000000e+00],
       [0.00000000e+00, 7.16247439e-01],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [4.33669806e-01, 0.00000000e+00],
       [0.00000000e+00, 1.49035692e+00],
       [0.00000000e+00, 1.09344757e+00],
       [9.16708112e-01, 0.00000000e+00],
       [0.00000000e+00, 1.65973628e+00],
       [0.00000000e+00, 2.00499352e-02],
       [0.00000000e+00, 0.00000000e+00],
       [5.68265676e-01, 1.02209079e+00],
       [0.00000000e+00, 1.21419775e+00],
       [9.90856826e-01, 0.00000000e+00],
       [0.00000000e+00, 1.23398876e+00],
       [2.04403296e-01, 2.76350111e-01],
       [0.00000000e+00, 1.52938342e+00],
       [0.00000000e+00, 2.56740361e-01],
       [1.70754105e-01, 0.00000000e+00],
       [8.34448636e-01, 0.00000000e+00],
       [3.14438254e-01, 0.00000000e+00],
       [4.61685121e-01, 6.72646193e-03],
       [0.00000000e+00, 3.36706281e-01],
       [1.01792181e+00, 0.00000000e+00],
       [4.60951120e-01, 0.00000000e+00],
       [3.81765574e-01, 0.00000000e+00],
       [0.00000000e+00, 1.08976591e+00],
       [9.61791258e-03, 0.00000000e+00],
       [3.47471297e-01, 0.00000000e+00],
       [0.00000000e+00, 1.25504220e+00],
       [0.00000000e+00, 1.53693020e+00],
       [1.89761668e-01, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [1.25547373e+00, 0.00000000e+00],
       [0.00000000e+00, 1.15319312e-01],
       [1.52636504e+00, 0.00000000e+00],
       [0.00000000e+00, 1.35989916e+00],
       [2.11954308e+00, 0.00000000e+00],
       [0.00000000e+00, 1.29366922e+00],
       [0.00000000e+00, 1.27615619e+00],
       [0.00000000e+00, 0.00000000e+00],
       [1.44746095e-01, 1.00865531e+00],
       [1.84737051e+00, 0.00000000e+00],
       [4.39038754e-01, 0.00000000e+00],
       [1.27399850e+00, 0.00000000e+00],
       [2.15075180e-01, 0.00000000e+00],
       [0.00000000e+00, 1.99279308e+00],
       [0.00000000e+00, 1.93393743e+00],
       [0.00000000e+00, 9.24756408e-01],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [1.08459997e+00, 0.00000000e+00],
       [6.53292060e-01, 0.00000000e+00],
       [1.74425601e-03, 6.03169858e-01],
       [0.00000000e+00, 1.22360504e+00],
       [7.24477887e-01, 0.00000000e+00],
       [0.00000000e+00, 1.34261131e-01],
       [0.00000000e+00, 1.28825092e+00],
       [0.00000000e+00, 3.43471676e-01],
       [0.00000000e+00, 1.90739021e-01],
       [0.00000000e+00, 1.65063941e+00],
       [3.27075362e-01, 0.00000000e+00],
       [0.00000000e+00, 5.35320751e-02],
       [8.03765893e-01, 0.00000000e+00],
       [0.00000000e+00, 1.38210475e-01],
       [8.32469463e-01, 0.00000000e+00],
       [0.00000000e+00, 1.24615824e+00],
       [0.00000000e+00, 1.89188913e-01],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 1.22369230e+00],
       [0.00000000e+00, 1.23862648e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 2.89220452e-01],
       [0.00000000e+00, 6.93475842e-01],
       [1.26052833e+00, 0.00000000e+00],
       [8.11270177e-01, 0.00000000e+00],
       [7.17246300e-03, 1.07612753e+00],
       [0.00000000e+00, 1.16010141e+00],
       [0.00000000e+00, 1.45201504e-01],
       [0.00000000e+00, 1.57141387e+00],
       [8.04015324e-02, 1.03055251e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 1.71059763e+00],
       [0.00000000e+00, 1.19643950e+00],
       [0.00000000e+00, 1.90915608e+00],
       [3.22056144e-01, 6.07722938e-01],
       [0.00000000e+00, 6.64388120e-01],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 8.90387952e-01],
       [5.86072147e-01, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 1.50817502e+00],
       [9.28590417e-01, 0.00000000e+00],
       [2.10868335e+00, 0.00000000e+00],
       [7.37695277e-01, 7.63464868e-01],
       [0.00000000e+00, 1.33708286e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 1.20783424e+00],
       [8.34045231e-01, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 1.06116390e+00],
       [8.69781256e-01, 0.00000000e+00],
       [0.00000000e+00, 9.28749919e-01],
       [2.00829908e-01, 8.49336863e-01],
       [1.53163862e+00, 0.00000000e+00],
       [1.99284184e+00, 0.00000000e+00],
       [0.00000000e+00, 9.08511698e-01],
       [2.26402044e+00, 0.00000000e+00],
       [1.25970995e+00, 0.00000000e+00],
       [1.54427350e-01, 0.00000000e+00],
       [0.00000000e+00, 1.46838665e-01],
       [1.00518793e-01, 0.00000000e+00],
       [0.00000000e+00, 3.78883839e-01],
       [0.00000000e+00, 1.38945961e+00],
       [1.06544566e+00, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [1.00382477e-01, 3.55819017e-01],
       [0.00000000e+00, 0.00000000e+00],
       [1.02865899e+00, 0.00000000e+00],
       [0.00000000e+00, 1.52306646e-01],
       [0.00000000e+00, 3.65597993e-01],
       [2.04810524e+00, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [6.09301031e-01, 3.82303931e-02],
       [1.46752799e+00, 0.00000000e+00],
       [0.00000000e+00, 1.65471339e+00],
       [6.14384532e-01, 8.38479996e-01],
       [6.04060650e-01, 0.00000000e+00],
       [0.00000000e+00, 1.75425458e+00],
       [0.00000000e+00, 1.80043399e+00],
       [2.47199059e+00, 0.00000000e+00],
       [1.92456818e+00, 0.00000000e+00],
       [8.17747831e-01, 0.00000000e+00],
       [0.00000000e+00, 4.20111626e-01],
       [0.00000000e+00, 7.08510101e-01],
       [0.00000000e+00, 1.11852992e+00],
       [1.13794529e+00, 0.00000000e+00],
       [0.00000000e+00, 8.85087371e-01],
       [1.30358905e-01, 0.00000000e+00],
       [1.39758766e-01, 9.98146951e-01],
       [0.00000000e+00, 8.79758745e-02],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 1.44496882e+00],
       [0.00000000e+00, 8.47945333e-01],
       [0.00000000e+00, 1.06132634e-01],
       [0.00000000e+00, 1.04704106e+00],
       [0.00000000e+00, 7.72841275e-01],
       [8.40024352e-01, 7.89873421e-01],
       [0.00000000e+00, 0.00000000e+00],
       [4.65001643e-01, 0.00000000e+00],
       [7.35101759e-01, 0.00000000e+00],
       [0.00000000e+00, 1.88874558e-01],
       [0.00000000e+00, 1.89503944e+00],
       [0.00000000e+00, 0.00000000e+00],
       [9.98270273e-01, 0.00000000e+00],
       [0.00000000e+00, 5.51352203e-01],
       [2.62805915e+00, 0.00000000e+00],
       [0.00000000e+00, 8.59187067e-01],
       [0.00000000e+00, 5.33129990e-01],
       [1.56493354e+00, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [8.55039239e-01, 1.39317721e-01],
       [1.26544762e+00, 0.00000000e+00],
       [0.00000000e+00, 1.32591283e+00],
       [0.00000000e+00, 1.27587032e+00],
       [1.81157362e+00, 0.00000000e+00],
       [5.32678068e-01, 0.00000000e+00],
       [1.94705737e+00, 0.00000000e+00],
       [5.06789088e-01, 0.00000000e+00],
       [7.77811766e-01, 0.00000000e+00],
       [2.53820753e+00, 0.00000000e+00],
       [6.41557097e-01, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [4.60951120e-01, 0.00000000e+00],
       [2.22764659e+00, 0.00000000e+00],
       [1.91693470e-01, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 6.98245913e-02],
       [7.05554605e-01, 0.00000000e+00],
       [0.00000000e+00, 1.14223957e+00],
       [0.00000000e+00, 1.86926985e+00],
       [1.00382477e-01, 3.55819017e-01],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 1.65382552e+00],
       [1.70428157e+00, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [9.82010782e-01, 3.74551892e-01],
       [0.00000000e+00, 9.50879790e-03],
       [0.00000000e+00, 6.98214248e-02],
       [1.85486650e+00, 0.00000000e+00],
       [0.00000000e+00, 5.04264534e-01],
       [0.00000000e+00, 0.00000000e+00],
       [1.41407818e-01, 9.31534529e-01],
       [2.52242256e-02, 1.66717798e-01],
       [8.55947435e-01, 0.00000000e+00],
       [8.66398871e-01, 0.00000000e+00],
       [3.00003260e-01, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [1.43798515e-01, 8.57022285e-01],
       [1.04515266e+00, 0.00000000e+00],
       [0.00000000e+00, 2.22521830e+00],
       [2.38295883e-01, 0.00000000e+00],
       [0.00000000e+00, 1.54612207e+00],
       [0.00000000e+00, 5.90397000e-01],
       [1.79277790e+00, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [3.86952341e-01, 0.00000000e+00],
       [0.00000000e+00, 4.17969227e-01],
       [0.00000000e+00, 7.88908839e-01],
       [0.00000000e+00, 1.47044230e+00],
       [0.00000000e+00, 6.41802132e-01],
       [2.73979425e-01, 0.00000000e+00],
       [2.03104162e+00, 0.00000000e+00],
       [3.89831036e-01, 0.00000000e+00],
       [1.97434282e+00, 0.00000000e+00],
       [1.96871743e-01, 4.28374588e-01],
       [2.67502125e-02, 1.53000608e-01],
       [5.77322006e-01, 0.00000000e+00],
       [3.32194746e-01, 4.71283555e-01],
       [4.07624662e-01, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [7.28671968e-01, 0.00000000e+00],
       [0.00000000e+00, 1.67647406e-01],
       [0.00000000e+00, 7.44670391e-01],
       [0.00000000e+00, 0.00000000e+00],
       [6.07994199e-01, 0.00000000e+00],
       [3.53980780e-01, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 8.27427506e-02],
       [1.06464159e+00, 0.00000000e+00],
       [0.00000000e+00, 1.35251784e+00],
       [2.06752205e+00, 0.00000000e+00],
       [0.00000000e+00, 5.09745836e-01],
       [0.00000000e+00, 1.27222002e+00],
       [0.00000000e+00, 2.12069556e-01],
       [0.00000000e+00, 8.05071235e-01],
       [0.00000000e+00, 1.11695266e+00],
       [4.79525685e-01, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [2.50306055e-02, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 1.82000792e+00],
       [0.00000000e+00, 1.40120578e+00],
       [0.00000000e+00, 1.45085514e+00],
       [0.00000000e+00, 9.86701429e-01],
       [1.54390791e-02, 0.00000000e+00],
       [0.00000000e+00, 1.05403507e+00],
       [8.44364822e-01, 0.00000000e+00],
       [6.68035029e-03, 0.00000000e+00],
       [0.00000000e+00, 9.87384260e-01],
       [0.00000000e+00, 1.01923418e+00],
       [1.24934697e+00, 0.00000000e+00],
       [9.57717538e-01, 1.50042921e-01],
       [7.82056868e-01, 0.00000000e+00],
       [1.78352499e+00, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 9.94785786e-01],
       [0.00000000e+00, 1.80472660e+00],
       [0.00000000e+00, 1.23860741e+00],
       [0.00000000e+00, 1.27492166e+00],
       [0.00000000e+00, 6.52845442e-01],
       [0.00000000e+00, 1.36377621e+00],
       [4.42920893e-01, 0.00000000e+00],
       [0.00000000e+00, 9.32486415e-01],
       [1.10331997e-01, 0.00000000e+00],
       [1.04437208e+00, 0.00000000e+00],
       [0.00000000e+00, 6.37910724e-01],
       [0.00000000e+00, 1.42864436e-01],
       [0.00000000e+00, 1.59217381e+00],
       [5.28620064e-01, 3.39544743e-01],
       [0.00000000e+00, 1.30564272e+00],
       [0.00000000e+00, 1.95538080e+00],
       [0.00000000e+00, 1.58302292e-01],
       [0.00000000e+00, 3.17262411e-01],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 1.38032854e+00],
       [6.04809821e-01, 0.00000000e+00],
       [8.43918800e-01, 0.00000000e+00],
       [2.56839848e+00, 0.00000000e+00],
       [2.90803671e-01, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 1.22878051e+00],
       [0.00000000e+00, 3.72181267e-01],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 1.33906448e+00],
       [0.00000000e+00, 2.22761005e-01],
       [4.14691240e-01, 0.00000000e+00],
       [4.30658430e-01, 5.22968769e-01],
       [0.00000000e+00, 1.60129592e-01],
       [2.34632641e-01, 0.00000000e+00],
       [0.00000000e+00, 1.66441774e+00],
       [0.00000000e+00, 6.49048090e-01],
       [0.00000000e+00, 7.81139791e-01],
       [0.00000000e+00, 1.05873775e+00],
       [0.00000000e+00, 1.66991925e+00],
       [0.00000000e+00, 1.56140316e+00],
       [0.00000000e+00, 1.41423297e+00],
       [3.10068190e-01, 0.00000000e+00],
       [2.21441045e-01, 0.00000000e+00],
       [0.00000000e+00, 4.24925119e-01],
       [4.75076497e-01, 0.00000000e+00],
       [9.17636871e-01, 0.00000000e+00],
       [5.41674137e-01, 0.00000000e+00],
       [8.99932012e-02, 0.00000000e+00],
       [1.64200687e+00, 0.00000000e+00],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 1.98655641e+00],
       [0.00000000e+00, 1.33447814e+00],
       [0.00000000e+00, 4.17892188e-01],
       [6.23544812e-01, 7.01743901e-01],
       [0.00000000e+00, 1.30540490e+00],
       [2.39228368e+00, 0.00000000e+00],
       [3.86015065e-02, 9.99558866e-01],
       [0.00000000e+00, 2.75503784e-01],
       [0.00000000e+00, 3.97033393e-01],
       [0.00000000e+00, 3.54759037e-01],
       [0.00000000e+00, 0.00000000e+00],
       [0.00000000e+00, 2.28818440e+00],
       [0.00000000e+00, 9.90561187e-01],
       [0.00000000e+00, 6.85648620e-01],
       [1.31632590e+00, 0.00000000e+00],
       [6.52459681e-01, 0.00000000e+00],
       [0.00000000e+00, 6.37631953e-01],
       [7.03163564e-01, 1.64808884e-01],
       [5.10819435e-01, 0.00000000e+00],
       [0.00000000e+00, 4.75564897e-01],
       [0.00000000e+00, 1.45063341e+00],
       [9.03004646e-01, 0.00000000e+00],
       [6.87576607e-02, 0.00000000e+00],
       [0.00000000e+00, 2.65686154e-01],
       [2.70112067e-01, 0.00000000e+00]], dtype=float32)]

Neural separation

Put on your gloves because you're going to perform brain surgery!

Neurons learn by updating their weights to output values that help them better distinguish between the different output classes in your dataset. You will make use of the inp_to_out() function you just built to visualize the output of two neurons in the first layer of the Banknote Authentication model as it learns.

def plot():
    fig, axes = plt.subplots(1, 5, figsize=(16, 8))
    for i, a in enumerate(axes):
        a.scatter(layer_outputs[i][:, 0], layer_outputs[i][:, 1], c=y_test, edgecolors='none');
        a.set_title('Test Accuracy: {:3.1f} %'.format(float(test_accuracies[i]) * 100.));
    plt.tight_layout()
model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
layer_outputs = []
test_accuracies = []

for i in range(0, 21):
    # Train model for 1 epoch
    h = model.fit(X_train, y_train, batch_size=16, epochs=1, verbose=0)
    if i % 4 == 0:
        # Get the output of the first layer
        layer_outputs.append(inp_to_out([X_test])[0])
        
        # Evaluate model accuracy for this epoch
        test_accuracies.append(model.evaluate(X_test, y_test)[1])
412/412 [==============================] - 0s 131us/sample - loss: 0.6889 - accuracy: 0.6748
412/412 [==============================] - 0s 39us/sample - loss: 0.5045 - accuracy: 0.8932
412/412 [==============================] - 0s 39us/sample - loss: 0.4065 - accuracy: 0.9005
412/412 [==============================] - 0s 39us/sample - loss: 0.3392 - accuracy: 0.9442
412/412 [==============================] - 0s 39us/sample - loss: 0.2877 - accuracy: 0.9709
412/412 [==============================] - 0s 36us/sample - loss: 0.2451 - accuracy: 0.9757
plot()

That took a while! If you take a look at the graphs you can see how the neurons are learning to spread out the inputs based on whether they are fake or legit dollar bills. (A single fake dollar bill is represented as a purple dot in the graph) At the start the outputs are closer to each other, the weights are learned as epochs go by so that fake and legit dollar bills get a different, further and further apart output.

Building an autoencoder

Autoencoders have several interesting applications like anomaly detection or image denoising. They aim at producing an output identical to its inputs. The input will be compressed into a lower dimensional space, encoded. The model then learns to decode it back to its original form.

You will encode and decode the MNIST dataset of handwritten digits, the hidden layer will encode a 32-dimensional representation of the image, which originally consists of 784 pixels (28 x 28). The autoencoder will essentially learn to turn the 784 pixels original image into a compressed 32 pixels image and learn how to use that encoded representation to bring back the original 784 pixels image.

from tensorflow.keras.datasets import mnist

(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.astype('float32') / 255.
X_test = X_test.astype('float32') / 255.
X_train = X_train.reshape((len(X_train), np.prod(X_train.shape[1:])))
X_test = X_test.reshape((len(X_test), np.prod(X_test.shape[1:])))
X_test_noise = np.load('./dataset/X_test_MNIST_noise.npy')
X_test_noise = X_test_noise.reshape((len(X_test_noise), np.prod(X_test.shape[1:])))
y_test_noise = np.load('./dataset/y_test_MNIST.npy')

Note: When I used ’adagrad’ as an optimizer, it doesn’t show correct answer. But after I changed ’adam’, it works.
autoencoder = Sequential(name='autoencoder')

# Add a dense layer with input the original image pixels and neurons the encoded representation
autoencoder.add(Dense(32, input_shape=(784, ), activation='relu'))

# Add an output layer with as many neurons as the original image pixels
autoencoder.add(Dense(784, activation='sigmoid'))

# Compile your model with adadelta
autoencoder.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

# Summarize your model structure
autoencoder.summary()
Model: "autoencoder"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_4 (Dense)              (None, 32)                25120     
_________________________________________________________________
dense_5 (Dense)              (None, 784)               25872     
=================================================================
Total params: 50,992
Trainable params: 50,992
Non-trainable params: 0
_________________________________________________________________

De-noising like an autoencoder

Okay, you have just built an autoencoder model. Let's see how it handles a more challenging task.

First, you will build a model that encodes images, and you will check how different digits are represented with show_encodings(). To build the encoder you will make use of your autoencoder, that has already being trained. You will just use the first half of the network, which contains the input and the bottleneck output. That way, you will obtain a 32 number output which represents the encoded version of the input image.

Then, you will apply your autoencoder to noisy images from MNIST, it should be able to clean the noisy artifacts.

The digits in this noisy dataset look like this: noise

def show_encodings(encoded_imgs,number=1):
    n = 5  # how many digits we will display
    original = X_test_noise
    original = original[np.where(y_test_noise == number)]
    encoded_imgs = encoded_imgs[np.where(y_test_noise == number)]
    plt.figure(figsize=(20, 4))
    #plt.title('Original '+str(number)+' vs Encoded representation')
    for i in range(min(n,len(original))):
        # display original imgs
        ax = plt.subplot(2, n, i + 1)
        plt.imshow(original[i].reshape(28, 28))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)

        # display encoded imgs
        ax = plt.subplot(2, n, i + 1 + n)
        plt.imshow(np.tile(encoded_imgs[i],(32,1)))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)

def compare_plot(original,decoded_imgs):
    n = 4  # How many digits we will display
    plt.figure(figsize=(20, 4))
    for i in range(n):
        # Display original
        ax = plt.subplot(2, n, i + 1)
        plt.imshow(original[i].reshape(28, 28))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)

        # Display reconstruction
        ax = plt.subplot(2, n, i + 1 + n)
        plt.imshow(decoded_imgs[i].reshape(28, 28))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)
    plt.title('Noisy vs Decoded images')
autoencoder.fit(X_train, X_train,
                epochs=100,
                batch_size=256,
                shuffle=True,
                validation_data=(X_test, X_test), verbose=0);
encoder = Sequential()
encoder.add(autoencoder.layers[0])

# Encode the noisy images and show the encodings for your favorite number [0-9]
encodings = encoder.predict(X_test_noise)
show_encodings(encodings, number=1)
decoder_layer = autoencoder.layers[-1]
decoder = tf.keras.models.Model()

# Predict on the noisy images with your autoencoder
decoded_imgs = autoencoder.predict(X_test)

# Plot noisy vs decoded images
compare_plot(X_test, decoded_imgs)
decoded_imgs = autoencoder.predict(X_test_noise)

# Plot noisy vs decoded images
compare_plot(X_test_noise, decoded_imgs)

Intro to CNNs

cnn

Building a CNN model

Building a CNN model in Keras isn't much more difficult than building any of the models you've already built throughout the course! You just need to make use of convolutional layers.

You're going to build a shallow convolutional model that classifies the MNIST digits dataset. The same one you de-noised with your autoencoder! The images are 28 x 28 pixels and just have one channel, since they are black and white pictures.

Go ahead and build this small convolutional model!

from tensorflow.keras.layers import Conv2D, Flatten

# Instantiate model
model = Sequential()

# Add a convolutional layer of 32 filters of size 3x3
model.add(Conv2D(32, kernel_size=3, input_shape=(28, 28, 1), activation='relu'))

# Add a convolutional layer of 16 filters of size 3x3
model.add(Conv2D(16, kernel_size=3, activation='relu'))

# Flattn the previous layer output
model.add(Flatten())

# Add as many outputs as classes with softmax activation
model.add(Dense(10, activation='softmax'))

Looking at convolutions

Inspecting the activations of a convolutional layer is a cool thing. You have to do it at least once in your lifetime!

To do so, you will build a new model with the Keras Model object, which takes in a list of inputs and a list of outputs. The output you will provide to this new model is the first convolutional layer outputs when given an MNIST digit as input image.

Let's look at the convolutional masks that were learned in the first convolutional layer of this model!

model.summary()
Model: "sequential_3"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 26, 26, 32)        320       
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 24, 24, 16)        4624      
_________________________________________________________________
flatten (Flatten)            (None, 9216)              0         
_________________________________________________________________
dense_6 (Dense)              (None, 10)                92170     
=================================================================
Total params: 97,114
Trainable params: 97,114
Non-trainable params: 0
_________________________________________________________________
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = np.reshape(X_train, [-1, 28, 28, 1])
X_test = np.reshape(X_test, [-1, 28, 28, 1])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(X_train, y_train, epochs=30, batch_size=32);
Train on 60000 samples
Epoch 1/30
60000/60000 [==============================] - 4s 64us/sample - loss: 0.2169 - accuracy: 0.9472
Epoch 2/30
60000/60000 [==============================] - 3s 48us/sample - loss: 0.0722 - accuracy: 0.9785
Epoch 3/30
60000/60000 [==============================] - 3s 49us/sample - loss: 0.0464 - accuracy: 0.9856
Epoch 4/30
60000/60000 [==============================] - 3s 48us/sample - loss: 0.0348 - accuracy: 0.9893
Epoch 5/30
60000/60000 [==============================] - 3s 49us/sample - loss: 0.0324 - accuracy: 0.9906
Epoch 6/30
60000/60000 [==============================] - 3s 49us/sample - loss: 0.0233 - accuracy: 0.9930
Epoch 7/30
60000/60000 [==============================] - 3s 48us/sample - loss: 0.0203 - accuracy: 0.9938
Epoch 8/30
60000/60000 [==============================] - 3s 48us/sample - loss: 0.0206 - accuracy: 0.9946
Epoch 9/30
60000/60000 [==============================] - 3s 48us/sample - loss: 0.0187 - accuracy: 0.9949
Epoch 10/30
60000/60000 [==============================] - 3s 48us/sample - loss: 0.0151 - accuracy: 0.9958
Epoch 11/30
60000/60000 [==============================] - 3s 48us/sample - loss: 0.0146 - accuracy: 0.9958
Epoch 12/30
60000/60000 [==============================] - 3s 48us/sample - loss: 0.0158 - accuracy: 0.9962
Epoch 13/30
60000/60000 [==============================] - 3s 48us/sample - loss: 0.0132 - accuracy: 0.9970
Epoch 14/30
60000/60000 [==============================] - 3s 47us/sample - loss: 0.0138 - accuracy: 0.9966
Epoch 15/30
60000/60000 [==============================] - 3s 47us/sample - loss: 0.0151 - accuracy: 0.9969
Epoch 16/30
60000/60000 [==============================] - 3s 47us/sample - loss: 0.0155 - accuracy: 0.9967
Epoch 17/30
60000/60000 [==============================] - 3s 47us/sample - loss: 0.0132 - accuracy: 0.9974
Epoch 18/30
60000/60000 [==============================] - 3s 47us/sample - loss: 0.0110 - accuracy: 0.9978
Epoch 19/30
60000/60000 [==============================] - 3s 50us/sample - loss: 0.0159 - accuracy: 0.9973
Epoch 20/30
60000/60000 [==============================] - 3s 52us/sample - loss: 0.0153 - accuracy: 0.9974
Epoch 21/30
60000/60000 [==============================] - 3s 49us/sample - loss: 0.0112 - accuracy: 0.9980
Epoch 22/30
60000/60000 [==============================] - 3s 48us/sample - loss: 0.0139 - accuracy: 0.9976
Epoch 23/30
60000/60000 [==============================] - 3s 47us/sample - loss: 0.0158 - accuracy: 0.9977
Epoch 24/30
60000/60000 [==============================] - 3s 48us/sample - loss: 0.0142 - accuracy: 0.9979
Epoch 25/30
60000/60000 [==============================] - 3s 48us/sample - loss: 0.0177 - accuracy: 0.9978
Epoch 26/30
60000/60000 [==============================] - 3s 48us/sample - loss: 0.0106 - accuracy: 0.9986
Epoch 27/30
60000/60000 [==============================] - 3s 48us/sample - loss: 0.0151 - accuracy: 0.9982
Epoch 28/30
60000/60000 [==============================] - 3s 49us/sample - loss: 0.0144 - accuracy: 0.9984
Epoch 29/30
60000/60000 [==============================] - 3s 50us/sample - loss: 0.0132 - accuracy: 0.9983
Epoch 30/30
60000/60000 [==============================] - 3s 50us/sample - loss: 0.0185 - accuracy: 0.9980
first_layer_output = model.layers[0].output

# Build a model using the model's input and the first layer output
first_layer_model = tf.keras.models.Model(inputs=model.layers[0].input, outputs=first_layer_output)

# Use this model to predict on X_test
activations = first_layer_model.predict(X_test)

fig, axs = plt.subplots(1, 3, figsize=(16, 8))

# Plot the activations of first digit of X_test for the 15th filter
axs[1].matshow(activations[0,:,:,14], cmap = 'viridis');

# Do the same but for the 18th filter now
axs[2].matshow(activations[0,:,:,17], cmap = 'viridis');

axs[0].matshow(X_test[0,:,:,0], cmap='viridis');

Each neuron filter of the first layer learned a different convolution. The 15th filter (a.k.a convolutional mask) learned to detect horizontal traces in your digits. On the other hand, filter 18th seems to be checking for vertical traces.

Preparing your input image

The original ResNet50 model was trained with images of size 224 x 224 pixels and a number of preprocessing operations; like the subtraction of the mean pixel value in the training set for all training images. You need to pre-process the images you want to predict on in the same way.

When predicting on a single image you need it to fit the model's input shape, which in this case looks like this: (batch-size, width, height, channels),np.expand_dims with parameter axis = 0 adds the batch-size dimension, representing that a single image will be passed to predict. This batch-size dimension value is 1, since we are only predicting on one image.

You will go over these preprocessing steps as you prepare this dog's (named Ivy) image into one that can be classified by ResNet50. dog

from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input

# Load the image with the right target size for your model
img = image.load_img('./dataset/dog.png', target_size=(224, 224))

# Turn it into an array
img_array = image.img_to_array(img)

# Expand the dimensions of the image, this is so that it fits the expected model input format
img_expanded = np.expand_dims(img_array, axis=0)

# Pre-process the img in the same way original images were
img_ready = preprocess_input(img_expanded)

Using a real world model

Okay, so Ivy's picture is ready to be used by ResNet50. It is stored in img_ready and now looks like this: dogp ResNet50 is a model trained on the Imagenet dataset that is able to distinguish between 1000 different labeled objects. ResNet50 is a deep model with 50 layers, you can check it in 3D here. It's time to use this trained model to find out Ivy's breed!

from tensorflow.keras.applications.resnet50 import ResNet50, decode_predictions

# Instantiate a ResNet50 model with 'imagenet' weights
model = ResNet50(weights='imagenet')

# Predict with ResNet50 on your already processed img
preds = model.predict(img_ready)

# Decode the first 3 predictions
print('Predicted:', decode_predictions(preds, top=3)[0])
Predicted: [('n02088364', 'beagle', 0.9073764), ('n02089867', 'Walker_hound', 0.06626676), ('n02089973', 'English_foxhound', 0.018850898)]

Custom dog image

img = image.load_img('./dataset/grace.jpg', target_size=(224, 224))
img_array = image.img_to_array(img)
img_expanded = np.expand_dims(img_array, axis=0)
img_ready = preprocess_input(img_expanded)
plt.imshow(img);
preds = model.predict(img_ready)

# Decode the first 3 predictions
print('Predicted:', decode_predictions(preds, top=3)[0])
Predicted: [('n02085936', 'Maltese_dog', 0.17497538), ('n02098286', 'West_Highland_white_terrier', 0.12481658), ('n02086240', 'Shih-Tzu', 0.09362568)]

Intro to LSTMs

  • Recurrent Neural Network (RNN) rnn
  • Long Short Term Memory (LSTM) lstm
  • When to use LSTM?
    • Image captioning
    • Speech to text
    • Text translation
    • Document summarization
    • Text generation
    • Musical composition

Text prediction with LSTMs

During the following exercises you will build a toy LSTM model that is able to predict the next word using a small text dataset. This dataset consist of cleaned quotes from the The Lord of the Ring movies.

You will turn this text into sequences of length 4 and make use of the Keras Tokenizer to prepare the features and labels for your model!

The Keras Tokenizer is already imported for you to use. It assigns a unique number to each unique word, and stores the mappings in a dictionary. This is important since the model deals with numbers but we later will want to decode the output numbers back into words.

text = '''
it is not the strength of the body but the strength of the spirit it is useless to meet revenge 
with revenge it will heal nothing even the smallest person can change the course of history all we have 
to decide is what to do with the time that is given us the burned hand teaches best after that advice about 
fire goes to the heart END
'''
from tensorflow.keras.preprocessing.text import Tokenizer

# Split text into an array of words
words = text.split()

# Make sentences of 4 words each, moving one word at a time
sentences = []
for i in range(4, len(words) + 1):
    sentences.append(' '.join(words[i - 4: i]))
    
# Instantiate a Tokenizer, then fit it on the sentences
tokenizer = Tokenizer()
tokenizer.fit_on_texts(sentences)

# Turn sentences into a sequence of numbers
sequences = tokenizer.texts_to_sequences(sentences)
print("Sentences: \n {} \n Sequences: \n {}".format(sentences[:5], sequences[:5]))
Sentences: 
 ['it is not the', 'is not the strength', 'not the strength of', 'the strength of the', 'strength of the body'] 
 Sequences: 
 [[5, 3, 43, 1], [3, 43, 1, 6], [43, 1, 6, 4], [1, 6, 4, 1], [6, 4, 1, 10]]

Build your LSTM model

You've already prepared your sequences of text. It's time to build your LSTM model!

Remember your sequences had 4 words each, your model will be trained on the first three words of each sequence, predicting the 4th one. You are going to use an Embedding layer that will essentially learn to turn words into vectors. These vectors will then be passed to a simple LSTM layer. Our output is a Dense layer with as many neurons as words in the vocabulary and softmax activation. This is because we want to obtain the highest probable next word out of all possible words.

vocab_size = len(tokenizer.word_counts) + 1
vocab_size
46
from tensorflow.keras.layers import Embedding, LSTM

model = Sequential()

# Add an Embedding layer with the right parameters
model.add(Embedding(input_dim=vocab_size, input_length=3, output_dim=8))

# Add a 32 unit LSTM layer
model.add(LSTM(32))

# Add a hidden Dense layer of 32 units
model.add(Dense(32, activation='relu'))
model.add(Dense(vocab_size, activation='softmax'))
model.summary()
Model: "sequential_4"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding (Embedding)        (None, 3, 8)              368       
_________________________________________________________________
lstm (LSTM)                  (None, 32)                5248      
_________________________________________________________________
dense_7 (Dense)              (None, 32)                1056      
_________________________________________________________________
dense_8 (Dense)              (None, 46)                1518      
=================================================================
Total params: 8,190
Trainable params: 8,190
Non-trainable params: 0
_________________________________________________________________
from tensorflow.keras.utils import to_categorical

np_sequences = np.array(sequences)
print(np_sequences.shape)

X = np_sequences[:, :3]
y = np_sequences[:, 3]
y = to_categorical(y, num_classes=vocab_size)
(64, 4)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(X, y, epochs=500, verbose=0);
def predict_text(test_text, model=model):
    if len(test_text.split()) != 3:
        print('Text input should be 3 words')
        return False
    # Turn the test_text into a sequence of numbers
    test_seq = tokenizer.texts_to_sequences([test_text])
    test_seq = np.array(test_seq)
    
    # Use the model passed as a parameter to predict the next word
    pred = model.predict(test_seq).argmax(axis=1)[0]
    
    # Return the word that maps to the prediction
    return tokenizer.index_word[pred]
predict_text('meet revenge with')
'revenge'
predict_text('the course of')
'history'
predict_text('strength of the')
'spirit'
def plot_graphs(history, string):
    plt.plot(history.history[string])
    plt.xlabel('Epochs')
    plt.ylabel(string)

plot_graphs(history, 'accuracy')
text = 'meet revenge with'
story = [text]
for i in range(100):
    result = predict_text(text)
    if result == "end":
        break;
    story.append(result)
    text += ' ' + result
    text = ' '.join(text.split()[1:])

story = " ".join(str(x) for x in story)
print('Final story : {}'.format(story))
Final story : meet revenge with revenge it will heal nothing even the smallest person can change the course of history all we have to decide is what to do with the time that is given us the burned hand teaches best after that advice about fire goes to the heart