Copyright CRC Press 2023 - present CRC Press
Author: Dr Stephen Lynch National Teaching Fellow FIMA SFHEA
# 16.1: Bifurcation of a Fitzhugh-Nagumo Limit Cycle.
# Hopf (safe) bifurcations.
from matplotlib import pyplot as plt
from matplotlib.animation import ArtistAnimation
import numpy as np
from scipy.integrate import odeint
fig = plt.figure()
myimages = []
theta , epsilon , gamma = 0.14 , 0.01 , 2.54
tmax = 200
def FN_ode(x, t):
return [i + x[0] * (x[0] - theta) * (1 - x[0]) - x[1], \
epsilon * (x[0] - gamma * x[1])]
time = np.arange(0, tmax, 0.1)
x0 = [0, 0.1]
for i in np.arange(0, 0.2, 0.01):
xs = odeint(FN_ode, x0, time)
imgplot = plt.plot(xs[:,0], xs[:, 1], 'r-')
myimages.append(imgplot)
anim = ArtistAnimation(fig, myimages, interval=500, blit=False, repeat_delay=100)
plt.close()
plt.show()
from IPython.display import HTML
HTML(anim.to_jshtml())
# 16.2: Morris-Lecar Model of a neuron, three limit cycles.
# The black limit cycle is unstable, the red and blue are stable.
# Can you produce an animation of a bifurcation from one limit cycle to another?
from matplotlib import pyplot as plt
import numpy as np
from scipy.integrate import odeint
fig = plt.figure()
i,gfast,V1,V2,V3,V4=82,20,-1.2,18,-20.5,10
ENa,EK,gslow,gleak,Eleak,phi,C=50,-100,20,2,-70,0.15,2
tmax = 100
def ML_ode(x, t):
return [(i-gfast*0.5*(1+np.tanh((x[0]-V1)/V2))*(x[0]-ENa)-gslow*x[1]*(x[0]-EK)-gleak*(x[0]-Eleak))/C, \
phi*(0.5*(1+np.tanh((x[0]-V3)/V4))-x[1])*np.cosh((x[0]-V3)/(2*V4))]
time = np.linspace(0, tmax, 1000)
x0=[-40,0.335]
xs1 = odeint(ML_ode, x0, time)
# The red limit cycle is stable.
imgplot = plt.plot(xs1[:,0], xs1[:, 1], 'r-')
x0=[-40,0.175]
# Take negative time so an unstable limit cycle becomes stable.
# The black limit cycle is unstable.
xs2 = odeint(ML_ode, x0, -time)
imgplot = plt.plot(xs2[:,0], xs2[:, 1], 'k-')
x0=[-40,0.03]
xs3 = odeint(ML_ode, x0, time)
# The blue limit cycle is stable.
imgplot = plt.plot(xs3[:,0], xs3[:, 1], 'b-')
plt.xlabel("V", fontsize=15)
plt.ylabel("N", fontsize=15)
plt.tick_params(labelsize=15)
plt.show()
# 16.3: Fitzhugh-Nagumo full adder
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
# Input current.
def Input_1(t):
return 1*(t>500)-1*(t>1000)+1*(t>2000)-1*(t>2500)+1*(t>3000)
def Input_2(t):
return 1*(t>1000)-1*(t>1500)+1*(t>2500)
def Input_3(t):
return 1*(t>1500)-1*(t>3000)+1*(t>3500)
# Constants
theta , gamma , epsilon , Tmax =0.1 , 0.1 , 0.1 , 2000
m , c =-100 , 60
w1 , w2 , x1 = 0.8 , 0.45 , 1.5 #weights
t=np.arange(0.0, 4000.0, 0.1)
def FN_ODEs(X,t):
u1,v1,u2,v2,u3,v3,usum,vsum,ucarry,vcarry=X
du1 = -u1*(u1-theta)*(u1-1)-v1+Input_1(t) #I_1
dv1 = epsilon*(u1-gamma*v1)
du2 = -u2*(u2-theta)*(u2-1)-v2+Input_2(t) #I_2
dv2 = epsilon*(u2-gamma*v2)
du3 = -u3*(u3-theta)*(u3-1)-v3+Input_3(t) #I_3
dv3 = epsilon*(u3-gamma*v3)
dusum = -usum*(usum-theta)*(usum-1)-vsum+w1/(1+np.exp(m*v1+c))+ w1/(1+np.exp(m*v2+c))+w1/(1+np.exp(m*v3+c))-x1/(1+np.exp(m*vcarry+c)) #O_1
dvsum = epsilon*(usum-gamma*vsum)
ducarry = -ucarry*(ucarry-theta)*(ucarry-1)-vcarry+w2/(1+np.exp(m*v1+c))+ w2/(1+np.exp(m*v2+c)) + w2/(1+np.exp(m*v3+c)) #O_2
dvcarry = epsilon*(ucarry-gamma*vcarry)
return du1,dv1,du2,dv2,du3,dv3,dusum,dvsum,ducarry,dvcarry
# Solve the coupled ODEs.
X = odeint(FN_ODEs,[0.01,0.01,0.01,0.01,0.01,0.01,0,0,0,0],t,rtol=1e-6)
plt.subplots_adjust(hspace = 1)
plt.figure(1)
plt.rcParams["font.size"] = "14"
plt.subplot(5,1,1)
plt.title("Fitzhugh-Nagumo Full-Adder")
plt.plot(t, X[: , 0], "b")
plt.ylim(-1, 1.5)
plt.ylabel("I$_1$")
plt.subplot(5,1,2)
plt.plot(t, X[:,2], "b")
plt.ylim(-1, 1.5)
plt.ylabel("I$_2$")
plt.subplot(5,1,3)
plt.plot(t, X[:,4], "b")
plt.ylim(0, 1)
plt.ylim(-1, 1.5)
plt.ylabel("I$_3$")
plt.subplot(5,1,4)
plt.plot(t, X[:,6], "g")
plt.ylim(-1, 1.5)
plt.ylabel("O$_1$")
plt.subplot(5,1,5)
plt.plot(t, X[:,8], "g")
plt.ylim(0, 1)
plt.ylim(-1, 1.5)
plt.ylabel("O$_2$")
plt.xlabel("Time")
plt.show()
# 16.4: The Fitzhugh-Nagumo Ballistic SR Flip-Flop with Noise.
# In practical applications SR flip-flops need to be noise resistant.
# Program takes 1 minute to compile.
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint # Input current.
def Input_1(t):
return 1*(t>1000)-1*(t>1010)
def Input_2(t):
return 1*(t>2000)-1*(t>2010)
def Ic1(t):
return 1*(t>500)
def Ic2(t):
return 1
# Constants
theta , gamma , epsilon , Tmax = 0.1 , 0.1 , 0.1 , 2000
m , c = -100 , 60
t=np.arange(0.0, 3000.0, 0.1)
length = len(t)
gauss = 2.5*np.random.normal(0,0.05,length*4) # Gaussian noise array
# Function to add Gaussian noise to system.
def noise(t,n):
interval = round(10*t-1)*n
return gauss[interval]
w1 , x1 = 0.5 , -1
def FN_ODEs(X,t):
u1,v1,u2,v2,u3,v3,u4,v4=X
du1 = -u1*(u1-theta)*(u1-1)-v1+Input_1(t) +noise(t,1) #I_1
dv1 = epsilon*(u1-gamma*v1)
du2 = -u2*(u2-theta)*(u2-1)-v2+Input_2(t) +noise(t,2) #I_2
dv2 = epsilon*(u2-gamma*v2)
du3 = -u3*(u3-theta)*(u3-1)-v3+w1/(1+np.exp(m*v1+c))+x1/(1+np.exp(m*v4+c)) + Ic1(t) + noise(t,3) #O_1
dv3 = epsilon*(u3-gamma*v3)
du4 = -u4*(u4-theta)*(u4-1)-v4+0.45/(1+np.exp(m*v2+c))+x1/(1+np.exp(m*v3+c)) + Ic2(t) + noise(t,4) #O_2
dv4 = epsilon*(u4-gamma*v4)
return du1,dv1,du2,dv2,du3,dv3,du4,dv4
X = odeint(FN_ODEs,[0.01,0.01,0.01,0.01,0,0,0,0],t,rtol=1e-6)
u1 = X[:,0];v1 = X[:,1];u2 = X[:,2];v2 = X[:,3];
u3 = X[:,4];v3 = X[:,5];u4 = X[:,6];v4 = X[:,7]
plt.subplots_adjust(hspace = 1)
plt.figure(1)
plt.rcParams["font.size"] = "14"
plt.subplot(4,1,1)
plt.title("Fitzhugh-Nagumo SR Flip-Flop With Noise")
plt.plot(t, u1, "b")
plt.ylim(-1, 1.5)
plt.ylabel("I$_1$")
plt.subplot(4,1,2)
plt.plot(t, u2, "b")
plt.ylim(-1, 1.5)
plt.ylabel("I$_2$")
plt.subplot(4,1,3)
plt.plot(t, u3, "g")
plt.ylim(0, 1)
plt.ylim(-1, 1.5)
plt.ylabel("O$_1$")
plt.subplot(4,1,4)
plt.plot(t, u4, "g")
plt.ylim(-1, 1.5)
plt.ylabel("O$_2$")
plt.xlabel("Time")
plt.show()
# 17.1: ANN for an XOR Logic Gate.
import numpy as np
w11 , w12 , w21 , w22 , w13 , w23 = 60 , 80 , 60 , 80 , -60 , 60
b1 , b2 , b3 = -90 , -40 , -30
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def XOR(x1 , x2):
h1 = x1 * w11 + x2 * w21 + b1
h2 = x1 * w12 + x2 * w22 + b2
o1 = sigmoid(h1) * w13 + sigmoid(h2) * w23 + b3
return sigmoid(o1)
print("XOR(0,0)= " , XOR(0 ,0))
print("XOR(0,1)= " , XOR(0 ,1))
print("XOR(1,0)= " , XOR(1 ,0))
print("XOR(1,1)= " , XOR(1 ,1))
XOR(0,0)= 9.357622968839299e-14 XOR(0,1)= 0.9999999999999065 XOR(1,0)= 0.9999999999999065 XOR(1,1)= 9.357622968891759e-14
# 17.2: Backpropagation, keep the biases constant in this case.
import numpy as np
w11,w12,w21,w22,w13,w23 = 0.2,0.15,0.25,0.3,0.15,0.1
b1 , b2 , b3 = -1 , -1 , -1
yt , eta = 0 , 0.1
x1 , x2 = 1 , 1
def sigmoid(v):
return 1 / (1 + np.exp(- v))
h1 = x1 * w11 + x2 * w21 + b1
h2 = x1 * w12 + x2 * w22 + b2
o1 = sigmoid(h1) * w13 + sigmoid(h2) * w23 + b3
y = sigmoid(o1)
print("y = ", y)
# Backpropagate.
dErrdw13=(yt-y)*sigmoid(o1)*(1-sigmoid(o1))*sigmoid(h1)
w13 = w13 - eta * dErrdw13
print("w13_new = ", w13)
dErrdw23=(yt-y)*sigmoid(o1)*(1-sigmoid(o1))*sigmoid(h2)
w23 = w23 - eta * dErrdw23
print("w23_new = ", w23)
dErrdw11=(yt-y)*sigmoid(o1)*(1-sigmoid(o1))*w13*sigmoid(h1)*(1-sigmoid(h1))*x1
w11 = w11 - eta * dErrdw11
print("w11_new = ", w11)
dErrdw12=(yt-y)*sigmoid(o1)*(1-sigmoid(o1))*w23*sigmoid(h2)*(1-sigmoid(h2))*x1
w12 = w12 - eta * dErrdw12
print("w12_new = ", w12)
dErrdw21=(yt-y)*sigmoid(o1)*(1-sigmoid(o1))*w13*sigmoid(h1)*(1-sigmoid(h1))*x2
w21 = w21 - eta * dErrdw21
print("w21_new = ", w21)
dErrdw22=(yt-y)*sigmoid(o1)*(1-sigmoid(o1))*w23*sigmoid(h1)*(1-sigmoid(h1))*x2
w22 = w22 - eta * dErrdw22
print("w22_new = ", w22)
y = 0.28729994077761756 w13_new = 0.1521522763401024 w23_new = 0.10215227634010242 w11_new = 0.20020766275648338 w12_new = 0.15013942100503588 w21_new = 0.2502076627564834 w22_new = 0.3001394210050359
# 17.3: Boston Housing with a Hidden Layer.
# Run in Google Colab.
# Results do not get any better after three hidden neurons.
import numpy as np
import matplotlib.pyplot as plt
# Import keras so that we can access the Boston housing data.
import tensorflow as tf
from tensorflow import keras
## Parameters
num_epochs = 5000
max_num_hidden = 3
eta = 0.05
## Functions
def UniformRandomMatrix (rows, cols):
res = [[np.random.uniform () for c in range (cols)] for r in range (rows)]
return np.matrix (res)
## Load the data
dataset = keras.datasets.boston_housing
(train_X, train_y), (_,_) = dataset.load_data (test_split = 0)
train_X = np.matrix (train_X)
train_y = np.matrix (train_y)
(num_samples, num_inputs) = train_X.shape
# Add bias
bias = np.ones (num_samples)
bias = np.matrix (bias).transpose ()
train_X = np.append (train_X, bias, axis=1)
## Normalize data
for i in range (num_inputs):
col = train_X[:,i]
train_X[:,i] = (col - col.mean()) / col.std()
miny = train_y.min ()
maxy = train_y.max ()
mean = (maxy + miny)/2
std = (maxy - miny)/2
train_y = (train_y - mean)/std
# Adjust for bias column
num_inputs += 1
# Adjust for sample size
eta /= num_samples
## Test various hidden node counts
for num_hidden in range (1, max_num_hidden + 1):
## Initialise weights
np.random.seed (123456)
w_hidden = 0.1*UniformRandomMatrix (num_inputs, num_hidden)
w_output = 0.1*UniformRandomMatrix (num_hidden+1, 1)
## Iterate
mse = []
for _ in range (num_epochs):
# Outputs
phi = np.append (bias, np.tanh (train_X*w_hidden), axis=1)
y = phi * w_output
err = y - train_y.transpose ()
# Gradients
g_output = phi.transpose() * err
phi_range = np.array (phi [:, range (1, num_hidden+1)])
w_output_range = w_output [range (1, num_hidden+1), 0].transpose()
err_term = np.array (err*w_output_range)
g_hidden = train_X.transpose() * np.matrix((1 - phi_range**2)*err_term)
# Update weights
w_output -= eta * g_output
w_hidden -= eta * g_hidden
mse.append (err.var ())
## Plot
plt.plot (range (num_epochs), mse, label="Hidden Neurons = {0}".format (num_hidden))
plt.legend()
plt.ylabel("MSE")
plt.xlabel("Epochs")
plt.show()
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/boston_housing.npz 57026/57026 [==============================] - 0s 0us/step
Question 17.4: Plotting bifurcation diagrams and comparing the results with the stability diagram in Figure 17.7(b). The eigenvalues are used to determine the stability boundaries.
The two-neuron module is modeled using the discrete system:
$$x_{n+1}=b_1+w_{11}\tanh(\alpha x_n)+w_{12}\tanh(\beta y_n), \\ y_{n+1}=b_2+w_{21} \tanh(\alpha x_n),$$where $x_n, y_n$ are neuron activation potentials, $b_1, b_2$ are biases, $w_{11}$, $w_{12}$, and $w_{21}$ are synaptic weights.
# 17.4: Bifurcation Diagrams for a Two-Neuron Module.
# Hysteresis - system is still in steady state, w12=3.
# See Figure 17.7(b). Here a=alpha and b=beta.
from matplotlib import pyplot as plt
import numpy as np
# Parameters for part (i). Change parameters to get bifurcation
# diagrams for parts (ii), (iii), and (iv).
b2, w11, w21, w12, a , b = -1 , 1.5 , 5 , 3 , 1 , 0.1
start, max = -5, 10
half_N = 1999
N = 2 * half_N + 1
N1 = 1 + half_N
xs_up, xs_down = [], []
x0, y0 = -2, -8
ns_up = np.arange(half_N)
ns_down = np.arange(N1, N)
# Ramp b1 up
for n in ns_up:
b1 = start + n*max / half_N
x = b1 + w11 * np.tanh(a * x0) + w12 * np.tanh(b * y0)
y = b2 + w21 * np.tanh(a * x0)
xn = x
x0 , y0 = x , y
xs_up.append([n, xn])
xs_up = np.array(xs_up)
# Ramp b1 down
for n in ns_down:
b1 = start + 2*max - n*max / half_N
x = b1 + w11 * np.tanh(a * x0) + w12 * np.tanh(b * y0)
y = b2 + w21 * np.tanh(a * x0)
xn = x
x0 , y0 = x , y
xs_down.append([N-n, xn])
xs_down = np.array(xs_down)
fig, ax = plt.subplots()
xtick_labels = np.linspace(start, max, 7)
ax.set_xticks([(-start + x) / max * N1 for x in xtick_labels])
ax.set_xticklabels(["{:.1f}".format(xtick) for xtick in xtick_labels])
plt.plot(xs_up[:, 0], xs_up[:, 1], "r.", markersize=0.1)
plt.plot(xs_down[:, 0], xs_down[:,1], "b.", markersize=0.1)
plt.xlabel(r"$b_1$", fontsize=15)
plt.ylabel(r"$x_n$", fontsize=15)
plt.tick_params(labelsize=15)
plt.show()
There is a large bistable region for $-1 < b_1 < 2$, approximately. The solutions are in steady-state. Compare with the stability diagram in Figure 17.7(b).
# 17.4: Bifurcation Diagrams for a Two-Neuron Module.
# Periodic and quasiperiodic behaviour for w12=-5.
# See Figure 17.7(b). Here a=alpha and b=beta.
from matplotlib import pyplot as plt
import numpy as np
# Parameters for part (ii). Change parameters to get bifurcation
# diagrams for parts (i), (iii), and (iv).
b2, w11, w21, w12, a , b = -1 , 1.5 , 5 , -5 , 1 , 0.1
start, max = -5, 10
half_N = 1999
N = 2 * half_N + 1
N1 = 1 + half_N
xs_up, xs_down = [], []
x0, y0 = -2, -8
ns_up = np.arange(half_N)
ns_down = np.arange(N1, N)
# Ramp b1 up
for n in ns_up:
b1 = start + n*max / half_N
x = b1 + w11 * np.tanh(a * x0) + w12 * np.tanh(b * y0)
y = b2 + w21 * np.tanh(a * x0)
xn = x
x0 , y0 = x , y
xs_up.append([n, xn])
xs_up = np.array(xs_up)
# Ramp b1 down
for n in ns_down:
b1 = start + 2*max - n*max / half_N
x = b1 + w11 * np.tanh(a * x0) + w12 * np.tanh(b * y0)
y = b2 + w21 * np.tanh(a * x0)
xn = x
x0 , y0 = x , y
xs_down.append([N-n, xn])
xs_down = np.array(xs_down)
fig, ax = plt.subplots()
xtick_labels = np.linspace(start, max, 7)
ax.set_xticks([(-start + x) / max * N1 for x in xtick_labels])
ax.set_xticklabels(["{:.1f}".format(xtick) for xtick in xtick_labels])
plt.plot(xs_up[:, 0], xs_up[:, 1], "r.", markersize=0.5)
plt.plot(xs_down[:, 0], xs_down[:,1], "b.", markersize=0.5)
plt.xlabel(r"$b_1$", fontsize=15)
plt.ylabel(r"$x_n$", fontsize=15)
plt.tick_params(labelsize=15)
plt.show()
There is a region in which there is periodic and quasiperiodic behavior, where the solutions are not in steady state. Compare with Figure 17.7(b).
# 18.1: ANN for a logical AND gate.
# Edit this program for the ANN for an OR gate.
# Run in Google Colab.
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
from tensorflow import keras
import sys
training_data = np.array([[0,0],[0, 1], [1, 0], [1, 1]], 'float32')
target_data = np.array([[0], [0], [0], [1]], 'float32')
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(1, input_dim = 2, activation = 'relu'))
model.add(tf.keras.layers.Dense(1, activation = 'sigmoid'))
model.compile(loss='mean_squared_error',optimizer=tf.keras.optimizers.Adam(0.1),metrics=['accuracy'])
hist = model.fit(training_data, target_data, epochs = 200, verbose = 0)
print(model.predict(training_data).round())
val_loss, val_acc = model.evaluate(training_data, target_data)
print(val_loss, val_acc)
loss_curve = hist.history["loss"]
acc_curve = hist.history["accuracy"]
plt.plot(loss_curve, label='Loss')
plt.plot(acc_curve, label='Accuracy')
plt.xlabel('Epochs')
plt.legend()
plt.show()
[[0.] [0.] [0.] [1.]] 1/1 [==============================] - 0s 176ms/step - loss: 3.1569e-04 - accuracy: 1.0000 0.0003156886377837509 1.0
# 18.2: Activation functions and their derivatives.
x = np.linspace(-10, 10, 1000)
y = np.maximum(0, x)
plt.figure(figsize=(15, 10))
plt.subplot(2, 2, 1)
plt.plot(x, y, label = "ReLU")
plt.plot(x, np.piecewise(x, [x <=0 , x > 0], [0, 1]), label = "d ReLU")
plt.xlabel("v")
plt.ylabel("ReLU(v)")
plt.legend()
plt.subplot(2, 2, 2)
x1 , x2 = np.linspace(-10, 0, 500), np.linspace(0, 10, 500)
plt.plot(x, np.piecewise(x, [x <=0 , x > 0], [0.1 * x1, x2]), label = "Leaky ReLU")
plt.plot(x, np.piecewise(x, [x <=0 , x > 0], [0.1 , 1]), label = "d Leaky ReLU")
plt.xlabel("v")
plt.ylabel("Leaky ReLU(v)")
plt.legend()
plt.subplot(2, 2, 3)
y = 1 / (1 + np.exp(-x) )
plt.plot(x, y,label = r"$\sigma=\frac{1}{1+e^{-v}}$")
plt.plot(x, y * (1 - y), label = r"$d \sigma = \sigma(v) (1 - \sigma(v))$" )
plt.legend()
plt.subplot(2, 2, 4)
y = ( 2 / (1 + np.exp(-2*x) ) ) -1
plt.plot(x,y,label = r"$\phi=\tanh(v)$")
plt.plot(x, 1 - y**2, label = r"$d \phi=1 - \tanh^2(v)$")
plt.legend()
plt.show()
# 18.3: TensorFlow program for the XOR gate.
# Using the ANN in Figure 17.4(a).
# Run in Google Colab.
import tensorflow as tf
import numpy as np
x = np.array([[0, 0],
[1, 0],
[0, 1],
[1, 1]], dtype=np.float32)
y = np.array([[0],
[1],
[1],
[0]], dtype=np.float32)
model = tf.keras.models.Sequential()
model.add(tf.keras.Input(shape=(2,)))
model.add(tf.keras.layers.Dense(2, activation=tf.keras.activations.sigmoid))
model.add(tf.keras.layers.Dense(1, activation=tf.keras.activations.sigmoid))
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.1),
loss=tf.keras.losses.MeanSquaredError(),
metrics=["accuracy"])
history = model.fit(x, y, batch_size=1, epochs=500,verbose=0)
model.summary()
print("Tensorflow version: ", tf.__version__)
predictions = model.predict_on_batch(x)
print(predictions)
# Pront out the weights of the gtrained ANN.
for layer in model.layers:
print(layer.get_weights())
val_loss, val_acc = model.evaluate(x, y)
print(val_loss, val_acc)
Model: "sequential_51" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_107 (Dense) (None, 2) 6 dense_108 (Dense) (None, 1) 3 ================================================================= Total params: 9 Trainable params: 9 Non-trainable params: 0 _________________________________________________________________ Tensorflow version: 2.8.2 [[0.02078554] [0.9638014 ] [0.9637823 ] [0.03449851]] [array([[ 5.7362804, -9.947191 ], [ 5.73968 , -9.977996 ]], dtype=float32), array([-8.82812 , 3.8244238], dtype=float32)] [array([[-7.4461546], [-7.636623 ]], dtype=float32), array([3.6220682], dtype=float32)] 1/1 [==============================] - 0s 116ms/step - loss: 0.0011 - accuracy: 1.0000 0.0010610617464408278 1.0
18.4 Read the Keras web pages: https://keras.io
Important: For Exercise 19.2, if you wish to get an Interactive Window in order to rotate the 3-D plot in Spyder, in the Console Window, type:
In[n]: matplotlib qt5
# 19.1: Discrete Hopfield Network.
import matplotlib.pyplot as plt
import numpy as np
import random
nb_patterns = 5
pattern_width = 9
pattern_height = 9
max_iterations = 81
# Initialize the patterns
X = np.zeros((nb_patterns, pattern_width * pattern_height))
X[0] = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1,
1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, 1,
1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]
X[1] = [-1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, 1,
-1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1,
1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1]
X[2] = [-1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1,
-1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1,
-1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1]
X[3] = [-1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1,
1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1,
-1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1]
X[4] = [1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, -1,
-1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1,
-1, -1, 1, 1, 1, 1, 1, 1, -1, -1, -1]
# Show the patterns
fig, ax = plt.subplots(1, nb_patterns, figsize=(10, 5))
for i in range(nb_patterns):
ax[i].matshow(X[i].reshape((pattern_height, pattern_width)), cmap='gray')
ax[i].set_xticks([])
ax[i].set_yticks([])
plt.show()
# The weight matrix.
W = ((np.outer(X[0],X[0])+np.outer(X[1],X[1])+np.outer(X[2],X[2])+np.outer(X[3],X[3])+np.outer(X[4],X[4]))-5*np.identity(81))/81
# hsgn function.
def hsgn(v, x):
if v>0:
return 1
elif v == 0:
return x
else:
return -1
# Create a corrupted test pattern
noiselevel = 1/3
values = list(range(nb_patterns))
patInd = random.choice(values)
Y = np.array(X[patInd])
x_test = np.array((2*(np.random.rand(81, 1).flatten() > noiselevel)-1)*Y)
x_test.flatten()
print('Pattern index=',patInd)
# Recover the original patterns
A = x_test.copy()
A.flatten()
n=np.random.permutation(81)
for _ in range(max_iterations):
for j in range(81):
A[n[j]]=hsgn(np.dot(W[n[j]],A), A[n[j]])
# Show corrupted and recovered patterns
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
ax[0].matshow(x_test.reshape(pattern_height, pattern_width), cmap='gray')
ax[0].set_title('Corrupted pattern')
ax[0].set_xticks([])
ax[0].set_yticks([])
ax[1].matshow(A.reshape(pattern_height, pattern_width), cmap='gray')
ax[1].set_title('Recovered pattern')
ax[1].set_xticks([])
ax[1].set_yticks([])
plt.show()
Pattern index= 4
# 19.2: Surface and contour plot of Hopfield Lyapunov function.
# In this case, there are two stable states (local minima).
# For an interactive plot in Spyder, type: matplotlib qt5 in the
# Console Window. You can then rotate the 3-D image.
import numpy as np
import matplotlib.pyplot as plt
gamma = 0.5
xmax , zmax = 1 , 6
def Lyapunov(x,y):
return -(7*x**2+12*x*y-2*y**2) / 2 - (4/(gamma*np.pi**2)) * (np.log(np.cos(np.pi*x/2)) \
+ np.log(np.cos(np.pi*y/2)))
y = np.linspace(-xmax,xmax,60)
x = np.linspace(-xmax,xmax,60)
X , Y = np.meshgrid(x,y)
Z = Lyapunov(X,Y).T
fig=plt.figure(figsize=(20,10))
plt.subplot(1,2,1)
ax=fig.add_subplot(1,2,1,projection='3d')
p=ax.plot_wireframe(X, Y, Z, rstride=3, cstride=3)
ax.plot_surface(X,Y,Z,rstride=3,cstride=3,alpha=0.1)
# Use Spyder to rotate the 3D plot.
ax.view_init(16, 141)
ax.set_xlim3d(-xmax, xmax)
ax.set_ylim3d(-xmax, xmax)
ax.set_zlim3d(-zmax, zmax)
ax.set_xlabel("$a_1$",fontsize=15)
ax.set_ylabel("$a_2$",fontsize=15)
ax.set_zlabel("$V$",fontsize=15)
plt.tick_params(labelsize=15)
plt.subplot(1,2,2)
plt.contour(X,Y,Z,[-4,-3,-2,-1,0,1,2,3,4])
plt.xlabel(r'$a_1$',fontsize=15)
plt.ylabel(r'$a_2$',fontsize=15)
plt.tick_params(labelsize=15)
plt.show()
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:14: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
# 19.3: Predicting chaos in the Lorenz continuous map.
# Run in Google Colab.
import tensorflow as tf
from tensorflow import keras
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import pandas as pd
# Lorenz paramters and initial conditions
sigma, beta, rho = 10, 2.667, 28
x0, y0, z0 = 0, 1, 1.05
# Maximum time point and total number of iterations.
tmax, n = 100, 1000
def Lorenz(X, t, sigma, beta, rho):
"""The Lorenz equations."""
x, y, z = X
dx = -sigma*(x - y)
dy = rho*x - y - x*z
dz = -beta*z + x*y
return dx, dy, dz
# Integrate the Lorenz equations on the time grid t.
t = np.linspace(0, tmax, n)
f = odeint(Lorenz, (x0, y0, z0), t, args=(sigma, beta, rho))
x, y, z = f.T
fig=plt.figure(figsize=(20,10))
plt.plot(t,x,'b-',lw=0.5)
plt.ylabel('x(t)')
plt.xlabel('Time')
plt.show()
Lorenz_chaos = x
df = pd.DataFrame(dict(Lorenz_chaos=Lorenz_chaos), columns=["Lorenz_chaos"])
df.head()
train_size = int(len(df) * 0.8)
test_size = len(df) - train_size
train, test = df.iloc[0:train_size], df.iloc[train_size:len(df)]
print(len(train), len(test))
def create_dataset(X, y, time_steps=1):
Xs, ys = [], []
for i in range(len(X) - time_steps):
v = X.iloc[i:(i + time_steps)].values
Xs.append(v)
ys.append(y.iloc[i + time_steps])
return np.array(Xs), np.array(ys)
time_steps = 10
# Reshape to [samples, time_steps, n_features]
X_train, y_train = create_dataset(train, train.Lorenz_chaos, time_steps)
X_test, y_test = create_dataset(test, test.Lorenz_chaos, time_steps)
print(X_train.shape, y_train.shape)
model = keras.Sequential()
model.add(keras.layers.LSTM(128, input_shape=(X_train.shape[1], X_train.shape[2])))
model.add(keras.layers.Dense(1))
model.compile(loss='mean_squared_error', optimizer=keras.optimizers.Adam(0.01))
history = model.fit(X_train, y_train, epochs=30, batch_size=16,
validation_split=0.2, verbose=0, shuffle=False)
fig=plt.figure(figsize=(20,10))
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.ylabel('Loss')
plt.xlabel('Epochs')
fig=plt.figure(figsize=(20,10))
y_pred = model.predict(X_test)
plt.plot(np.arange(0, len(y_train)), y_train, 'g', label="history")
plt.plot(np.arange(len(y_train), len(y_train) + len(y_test)), y_test, marker='.', label="true")
plt.plot(np.arange(len(y_train), len(y_train) + len(y_test)), y_pred, 'r', label="prediction")
plt.ylabel('Value')
plt.xlabel('Time Step')
plt.legend()
plt.show()
fig=plt.figure(figsize=(20,10))
plt.plot(y_test, marker='.', label="true")
plt.plot(y_pred, 'r', label="prediction")
plt.ylabel('Value')
plt.xlabel('Time Step')
plt.legend()
plt.show()
800 200 (790, 10, 1) (790,) 6/6 [==============================] - 0s 6ms/step
# 19.4: Using LSTM to predict future time series of Google Finance.
# Importing the relevant libraries.
# Run in Google Colab.
import pandas as pd
pd.options.display.max_colwidth = 60
import tensorflow as tf
from tensorflow import keras
import numpy as np
from numpy import mean
import numpy as np
import math
import statistics
import pylab
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
# Load data into Pandas data frame.
# In Google Colab, drag the file into the folder on the left.
dataframe = pd.read_csv("GOOG.csv")
df = dataframe
df = df[["Date" , "Close"]]
df.head()
# Normalize the data.
df_min_max_scaled = df.copy()
column = "Close"
df_min_max_scaled[column] = (df_min_max_scaled[column] - df_min_max_scaled[column].min()) \
/ (df_min_max_scaled[column].max() - df_min_max_scaled[column].min())
# View normalized data.
df_min_max_scaled.head()
df_min_max_scaled = df_min_max_scaled.set_index('Date')
df_min_max_scaled.head()
DATA = df_min_max_scaled
# We'll split training and test data as 80-20, respectively.
train_size= int(len(DATA)*0.8)
test_size= len(DATA) - train_size
# Splitting train and test data then printing the size (rows) of each.
train, test = DATA[0:train_size], DATA[train_size:len(DATA)]
print(len(train),len(test))
# Function:create_dataset.
# Converts data into numpy arrays.
def create_dataset(X,y,time_steps=1):
Xs, ys= [],[]
for i in range(len(X)-time_steps):
v= X.iloc[i:(i+time_steps)].values
Xs.append(v)
ys.append(y.iloc[i+time_steps])
return np.array(Xs), np.array(ys)
time_steps = 1
# Split data into X_train and y_train datasets.
# (train.Price and test.Price extracts the data from the train and test dataframe).
X_train, y_train = create_dataset(train, train.Close, time_steps)
# Splitting test data into X_test and y_test datasets.
X_test, y_test = create_dataset(test, test.Close, time_steps)
print(X_train.shape, y_train.shape)
# Defining the LSTM network architecture.
model=keras.Sequential( )
model.add(keras.layers.LSTM(128,input_shape=(X_train.shape[1],X_train.shape[2])))
model.add(keras.layers.Dense(1))
# Compile the model.
model.compile(loss='mean_squared_error',optimizer=keras.optimizers.Adam(0.01))
# Train the model (we use 500 epochs).
history= model.fit(X_train, y_train, epochs=500, batch_size=16,validation_split= 0.2,verbose=0,shuffle=False)
plt.plot(history.history['loss'], label= 'train')
plt.plot(history.history['val_loss'], label= 'test')
plt.xlabel('Iterations')
plt.ylabel('Loss')
plt.legend()
# Get the models predicted price values.
y_pred=model.predict(X_test)
# Plot the predictions along with the true outcomes
fig=plt.figure(figsize=(20,10))
plt.plot(np.arange(0,len(y_train)), y_train, 'g',label="history")
plt.plot(np.arange(len(y_train), len(y_train)+ len(y_test)), y_test, marker='.',label="true")
plt.plot(np.arange(len(y_train), len(y_train)+ len(y_test)), y_pred, 'r', label="prediction")
plt.ylabel('Price')
plt.xlabel('Time Step')
plt.legend()
plt.show()
fig=plt.figure(figsize=(20,10))
plt.plot(y_test, marker='.', label="true")
plt.plot(y_pred, 'r', label="prediction")
plt.ylabel('Value')
plt.xlabel('Time Step')
plt.legend()
plt.show()
201 51 (200, 1, 1) (200,)
# 20.1: Convolution.
import numpy as np
# Input padded array.
A = np.array([[0,0,0,0,0,0,0,0,0],
[0,0,0.9,0.8,0.8,0.9,0.3,0.1,0],
[0,0.2,0.6,0.1,0,0,0.1,0.2,0],
[0,0.3,0.4,0,0,0,0,0,0],
[0,0,0,0.3,0.5,0.6,0.2,0,0],
[0,0,0,0,0,0.1,0.5,0,0],
[0,0,0,0,0,0.2,0.7,0.4,0],
[0,0,0.1,0.6,0.3,0.4,0.3,0,0],
[0,0,0,0,0,0,0,0,0]])
# The filter (kernel).
K = np.array([[-1,-1,-1,-1,-1],
[-1,2,2,2,2],
[-1,2,-1,-1,-1],
[-1,2,-1,0,0],
[-1,2,-1,0,0]])
C = np.zeros([5,5])
for i in range(5):
for j in range(5):
C[j,i]=np.sum(A[j:5+j,i:5+i]*K)
print(C)
[[ 4.9 7.9 3.9 3.1 2. ] [-0.5 -2. -4.2 -2.4 -1.3] [-0.3 -1.8 -1.9 -0.8 -0.4] [ 0.8 1.6 3. 1.1 0.7] [-0.9 -1.8 -0.5 -2.1 -0.6]]
# 20.3: TensorBoard on Boston housing overfitting example.
# Run in Google Colab.
# The TensorBoard output will show in Google Colab.
try:
%tensorflow_version 2.x
except Exception:
pass
# Load the TensorBoard notebook extension
%load_ext tensorboard
import tensorflow as tf
from tensorflow import keras
import datetime
# Clear any logs from previous runs
!rm -rf ./logs/
# Load and normalize Boston Hosuing Data.
from keras.datasets import boston_housing
(x_train, y_train), (x_test, y_test) = boston_housing.load_data(path='boston_housing.npz',test_split=0,seed=113)
# Normalize the data.
ft_wise_mean = x_train.mean(axis=0) # Feature-wise mean.
x_train -= ft_wise_mean
ft_wise_std = x_train.std(axis=0) # Feature-wise std.
x_train /= ft_wise_std
x_test -= ft_wise_mean
x_test /= ft_wise_std
model = keras.Sequential([
keras.layers.Dense(100, input_dim=13, kernel_initializer='normal', activation='relu'),
keras.layers.Dense(100, kernel_initializer='normal', activation='relu'),
keras.layers.Dense(1, input_dim=13, kernel_initializer='normal')
])
num_epochs = 16
model.compile(loss='mean_squared_error', optimizer=tf.keras.optimizers.Adam(0.01))
hist=model.fit(x_train, y_train, epochs=num_epochs, validation_split=0.9, verbose=0)
log_dir="logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
model.fit(x=x_train, y=y_train, epochs=num_epochs, validation_data=(x_test, y_test),verbose=0,callbacks=[tensorboard_callback])
%tensorboard --logdir=logs
The tensorboard extension is already loaded. To reload it, use: %reload_ext tensorboard
Reusing TensorBoard on port 6006 (pid 1004), started 1:24:15 ago. (Use '!kill 1004' to kill it.)
!kill 1004
Solution 20.4. Run in Google Colab.
I hope you have enjoyed the book. I'm now working on the next edition which will be entitled "Python for Scientific Computing, AI and Cyber Security."