top of page
realcode4you

Creating Class Activation Map And Saliency Maps Using VGG16 In Machine Learning

Importing the libraries

import vis
import keras
from vis.visualization import visualize_saliency

Reading the images

loc = "./TEST.jpeg"
loc1 = "./TEST1.jpeg"
loc2 = "./TEST3.jpg"

Download vgg16

# step 1 ---> download vgg16
from keras.applications.vgg16 import VGG16
from keras.utils.vis_utils import plot_model
from keras.preprocessing.image import load_img

model = VGG16(
    include_top=True,
    weights="imagenet",
    input_tensor=None,
    input_shape=None,
    pooling=None,
    classes=1000,
)

#vgg16 architecture
model.summary()

Output:

Model: "vgg16" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) (None, 224, 224, 3) 0 _________________________________________________________________ block1_conv1 (Conv2D) (None, 224, 224, 64) 1792 _________________________________________________________________ block1_conv2 (Conv2D) (None, 224, 224, 64) 36928 _________________________________________________________________ block1_pool (MaxPooling2D) (None, 112, 112, 64) 0 _________________________________________________________________ block2_conv1 (Conv2D) (None, 112, 112, 128) 73856 _________________________________________________________________ block2_conv2 (Conv2D) (None, 112, 112, 128) 147584 _________________________________________________________________ block2_pool (MaxPooling2D) (None, 56, 56, 128) 0 _________________________________________________________________ block3_conv1 (Conv2D) (None, 56, 56, 256) 295168 _________________________________________________________________ block3_conv2 (Conv2D) (None, 56, 56, 256) 590080 _________________________________________________________________ block3_conv3 (Conv2D) (None, 56, 56, 256) 590080 _________________________________________________________________ block3_pool (MaxPooling2D) (None, 28, 28, 256) 0 _________________________________________________________________ block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160 _________________________________________________________________ block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808 _________________________________________________________________ block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808 _________________________________________________________________ block4_pool (MaxPooling2D) (None, 14, 14, 512) 0 _________________________________________________________________ block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808 _________________________________________________________________ block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808 _________________________________________________________________ block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808 _________________________________________________________________ block5_pool (MaxPooling2D) (None, 7, 7, 512) 0 _________________________________________________________________ flatten (Flatten) (None, 25088) 0 _________________________________________________________________ fc1 (Dense) (None, 4096) 102764544 _________________________________________________________________ fc2 (Dense) (None, 4096) 16781312 _________________________________________________________________ predictions (Dense) (None, 1000) 4097000 ================================================================= Total params: 138,357,544 Trainable params: 138,357,544 Non-trainable params: 0



plot_model(model, to_file='vgg.png')

Output:












# loading multiple images
image = load_img(loc, target_size=(224, 224))
image1 = load_img(loc1, target_size=(224, 224))
image2 = load_img(loc2, target_size=(224, 224))
from keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input

# convert the image pixels to a numpy array
image = img_to_array(image)
image1 = img_to_array(image1)
image2 = img_to_array(image2)

image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
image1 = image1.reshape((1, image1.shape[0], image1.shape[1], image1.shape[2]))
image2 = image2.reshape((1, image2.shape[0], image2.shape[1], image2.shape[2]))
# prepare the image for the VGG model
image = preprocess_input(image)
image1 = preprocess_input(image1)
image2 = preprocess_input(image2)
#making predictions
yhat = model.predict(image)
yhat1 = model.predict(image1)
yhat2 = model.predict(image2)
from keras.applications.vgg16 import decode_predictions
# convert the probabilities to class labels
label = decode_predictions(yhat)
label1 = decode_predictions(yhat1)
label2 = decode_predictions(yhat2)
# retrieve the most likely result, e.g. highest probability
label = label[0][0]
label1 = label1[0][0]
label2 = label2[0][0]
# print the classification
print('%s (%.2f%%)' % (label[1], label[2]*100))
print('%s (%.2f%%)' % (label1[1], label1[2]*100))
print('%s (%.2f%%)' % (label2[1], label2[2]*100))

Output:

Egyptian_cat (67.73%) French_bulldog (31.71%) vulture (99.98%)


!wget "https://raw.githubusercontent.com/raghakot/keras-vis/master/resources/imagenet_class_index.json"

#parsing classes
import json
CLASS_INDEX = json.load(open("imagenet_class_index.json"))
classlabel = []
for i_dict in range(len(CLASS_INDEX)):
    classlabel.append(CLASS_INDEX[str(i_dict)][1])
print("N of class={}".format(len(classlabel)))

Output:

N of class=1000



#input images
import matplotlib.pyplot as plt

_img = load_img(loc,target_size=(224,224))
plt.imshow(_img)
plt.show()

_img1 = load_img(loc1,target_size=(224,224))
plt.imshow(_img1)
plt.show()

_img2 = load_img(loc2,target_size=(224,224))
plt.imshow(_img2)
plt.show()

Output:






























Top 5 Predictions

#top 5 predictions
import numpy as np

class_idxs_sorted = np.argsort(yhat.flatten())[::-1]
class_idxs_sorted1 = np.argsort(yhat1.flatten())[::-1]
class_idxs_sorted2 = np.argsort(yhat2.flatten())[::-1]
topNclass         = 5

print('for first image\n')

for i, idx in enumerate(class_idxs_sorted[:topNclass]):
    print("Top {} predicted class:     Pr(Class={:18} [index={}])={:5.3f}".format(
          i + 1,classlabel[idx],idx,yhat[0,idx]))

print('for second image\n')

for i, idx in enumerate(class_idxs_sorted1[:topNclass]):
    print("Top {} predicted class:     Pr(Class={:18} [index={}])={:5.3f}".format(
          i + 1,classlabel[idx],idx,yhat1[0,idx]))

print('for third image\n')


for i, idx in enumerate(class_idxs_sorted2[:topNclass]):
    print("Top {} predicted class:     Pr(Class={:18} [index={}])={:5.3f}".format(
          i + 1,classlabel[idx],idx,yhat2[0,idx]))

Output:

for first image Top 1 predicted class: Pr(Class=Egyptian_cat [index=285])=0.677 Top 2 predicted class: Pr(Class=tabby [index=281])=0.213 Top 3 predicted class: Pr(Class=tiger_cat [index=282])=0.082 Top 4 predicted class: Pr(Class=lynx [index=287])=0.019 Top 5 predicted class: Pr(Class=Persian_cat [index=283])=0.004 for second image Top 1 predicted class: Pr(Class=French_bulldog [index=245])=0.317 Top 2 predicted class: Pr(Class=boxer [index=242])=0.251 Top 3 predicted class: Pr(Class=bull_mastiff [index=243])=0.188 Top 4 predicted class: Pr(Class=American_Staffordshire_terrier [index=180])=0.149 Top 5 predicted class: Pr(Class=Staffordshire_bullterrier [index=179])=0.039 for third image Top 1 predicted class: Pr(Class=vulture [index=23])=1.000 Top 2 predicted class: Pr(Class=hen [index=8])=0.000 Top 3 predicted class: Pr(Class=cock [index=7])=0.000 Top 4 predicted class: Pr(Class=black_stork [index=128])=0.000 Top 5 predicted class: Pr(Class=macaw [index=88])=0.000



from vis.utils import utils

layer_idx = utils.find_layer_idx(model, 'predictions')
# Swap softmax with linear
model.layers[layer_idx].activation = keras.activations.linear
model = utils.apply_modifications(model)

Creating Saliency Maps

# Saliency Maps
from vis.visualization import visualize_saliency
img               = img_to_array(_img)
img               = preprocess_input(img)
class_idx = class_idxs_sorted[0]
grad_top1 = visualize_saliency(model,
                               layer_idx,
                               filter_indices = class_idx,
                               seed_input     = img[np.newaxis,...])

img1               = img_to_array(_img1)
img1               = preprocess_input(img1)
class_idx1 = class_idxs_sorted[0]
grad_top11 = visualize_saliency(model,
                               layer_idx,
                               filter_indices = class_idx,
                               seed_input     = img1[np.newaxis,...])

img2               = img_to_array(_img2)
img2               = preprocess_input(img2)
class_idx2 = class_idxs_sorted[0]
grad_top12 = visualize_saliency(model,
                               layer_idx,
                               filter_indices = class_idx,
                               seed_input     = img2[np.newaxis,...])
# visualize SALIENCY MAPS
def plot_map(grads,_img,yhat):
    fig, axes = plt.subplots(1,2,figsize=(14,5))
    axes[0].imshow(_img)
    axes[1].imshow(_img)
    i = axes[1].imshow(grads,cmap="jet",alpha=0.8)
    fig.colorbar(i)
    plt.suptitle("Pr(class={}) = {:5.2f}".format(
                      classlabel[class_idx],
                      yhat[0,class_idx]))
plot_map(grad_top1,_img,yhat)
plot_map(grad_top11,_img1,yhat1)
plot_map(grad_top12,_img2,yhat2)

Output:






































#class activation map CAM
from vis.visualization import visualize_cam

penultimate_layer_idx = utils.find_layer_idx(model, "block5_conv3") 
class_idx  = class_idxs_sorted[0]
seed_input = img
grad_top1  = visualize_cam(model, layer_idx, class_idx, seed_input, 
                           penultimate_layer_idx = penultimate_layer_idx,#None,
                           backprop_modifier     = None,
                           grad_modifier         = None)

seed_input = img1
grad_top11  = visualize_cam(model, layer_idx, class_idx, seed_input, 
                           penultimate_layer_idx = penultimate_layer_idx,#None,
                           backprop_modifier     = None,
                           grad_modifier         = None)

seed_input = img2
grad_top12  = visualize_cam(model, layer_idx, class_idx, seed_input, 
                           penultimate_layer_idx = penultimate_layer_idx,#None,
                           backprop_modifier     = None,
                           grad_modifier         = None)
def plot_cam_map(grads,_img,yhat):
    fig, axes = plt.subplots(1,2,figsize=(14,5))
    axes[0].imshow(_img)
    axes[1].imshow(_img)
    i = axes[1].imshow(grads,cmap="jet",alpha=0.8)
    fig.colorbar(i)
    plt.suptitle("Pr(class={}) = {:5.2f}".format(
                      classlabel[class_idx],
                      yhat[0,class_idx]))
plot_cam_map(grad_top1,_img,yhat)
plot_cam_map(grad_top11,_img1,yhat1)
plot_cam_map(grad_top12,_img2,yhat2)

Output:




































If you need any programming assignment help in Machine Learning visualization Assignment, Machine Learning visualization project or Machine Learning visualization help or need solution of above problem then we are ready to help you.


Send your request at realcode4you@gmail.com and get instant help with an affordable price.

We are always focus to delivered unique or without plagiarism code which is written by our highly educated professional which provide well structured code within your given time frame.


If you are looking other programming language help like C, C++, Java, Python, PHP, Asp.Net, NodeJs, ReactJs, etc. with the different types of databases like MySQL, MongoDB, SQL Server, Oracle, etc. then also contact us.

Comments


bottom of page