# -*- coding: utf-8 -*-
"""gradcam-tf-explain-catdog.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/PracticalDL/Practical-Deep-Learning-Book/blob/master/code/chapter-5/3-tf-explain.ipynb
# `tf-explain`
tf-explain (by Raphael Meudec) helps understand the results and inner workings of a neural network with the help of visualizations, removing the veil on bias in our datasets. Few different visualization approaches are available with tf.explain.
- Grad CAM: The Gradient-weighted Class Activation Mapping visualizes how parts of the image affect the neural network's output by looking into the activation maps. A heatmap is generated based on the gradients of the object id from the last convolutional layer. Grad CAM is largely a broad-spectrum heatmap generator as it is robust to noise and can be used on an array of CNN models.
- Activations: Visualize the activations for the convolutional layers.
- Occlusion Sensitivity: Occludes a part of the image (using a small square patch placed randomly) to figure out how robust the network is. If the prediction is still correct, on average, the network is robust. The area in the image that is the warmest (i.e. red) has the most effect on the prediction when occluded.
In this notebook we will produce different visualizations on the sample images.
Note: After executing the first cell you may have to `RESTART RUNTIME` if you are running on Google Colab.
"""
# Perform all installations
!pip install tensorflow-gpu==2.0.0
!pip install tf-explain==0.1.0
# Commented out IPython magic to ensure Python compatibility.
import tensorflow as tf
import numpy as np
#Get TensorBoard to run
# %load_ext tensorboard
"""We can add multiple types of callbacks while training or use its core API to generate TensorFlow events that can later be loaded into TensorBoard."""
from tf_explain.core.grad_cam import GradCAM
from tf_explain.core.occlusion_sensitivity import OcclusionSensitivity
from tf_explain.core.activations import ExtractActivations
For inference, all we need to do is pass an image, its ImageNet object ID along with a model into tf-explain’s functions. The object id is needed as tf.explain needs to figure out what is activated for that particular class.
try:
import google.colab
IS_COLAB_ENV = True
except:
IS_COLAB_ENV = False
def download_sample_image(filename):
import requests
url = f'https://raw.githubusercontent.com/PracticalDL/Practical-Deep-Learning-Book/master/sample-images/{filename}'
open(filename, 'wb').write(requests.get(url).content)
if IS_COLAB_ENV:
IMAGE_PATHS = ['dog.jpg', 'cat.jpg']
for each_filename in IMAGE_PATHS:
download_sample_image(each_filename)
else:
IMAGE_PATHS = [ '../../sample-images/dog.jpg', '../../sample-images/cat.jpg']
#get summary of VGG16 network
model = tf.keras.applications.vgg16.VGG16(weights='imagenet',
include_top=True)
model.summary()
#get summary of MobileNEt network
model = tf.keras.applications.mobilenet.MobileNet(weights='imagenet',
include_top=True)
model.summary()
#get summary of RestNet50 network
model = tf.keras.applications.resnet50.ResNet50(weights='imagenet',
include_top=True)
model.summary()
indices = [263, 281]
layers_name = ['activation_6']
for i in range(len(IMAGE_PATHS)):
each_path = IMAGE_PATHS[i]
index = indices[i]
img = tf.keras.preprocessing.image.load_img(each_path,
target_size=(224, 224))
img = tf.keras.preprocessing.image.img_to_array(img)
data = ([img], None)
# Define name with which to save the result as
name = each_path.split("/")[-1].split(".jpg")[0]
#----------------------------------------------
# For VGG16 Classifier CNN model
#Save the Grad Cam visualization
explainer = GradCAM()
model = tf.keras.applications.vgg16.VGG16(weights='imagenet',
include_top=True)
grid = explainer.explain(data, model, 'block5_conv3', index)
explainer.save(grid, '.', name + 'grad_cam_VGG16.png')
#>>> new stuff
# Compute Occlusion Sensitivity for patch_size 20
explainer = OcclusionSensitivity()
grid = explainer.explain(data, model, index, 20)
explainer.save(grid, '.', name + 'occlusion_sensitivity_20_VGG16.png')
# Compute Occlusion Sensitivity for patch_size 10
grid = explainer.explain(data, model, index, 10)
explainer.save(grid, '.', name + 'occlusion_sensitivity_10_VGG16.png')
# Save the Activations visualizations
data = (np.array([img]), None)
explainer = ExtractActivations()
#conv1 does not work for name of VGG16
#grid = explainer.explain(data, model, ['conv1'])
#explainer.save(grid, '.', name + 'activations_VGG16.png')
#---------------------------------------
#For ResNet50 Classifier CNN model
# Save the Occlusion Sensitivity visualization
explainer = OcclusionSensitivity()
model = tf.keras.applications.resnet50.ResNet50(weights='imagenet',
include_top=True)
# Compute Occlusion Sensitivity for patch_size 20
grid = explainer.explain(data, model, index, 20)
explainer.save(grid, '.', name + 'occlusion_sensitivity_20_ResNet50.png')
# Compute Occlusion Sensitivity for patch_size 10
grid = explainer.explain(data, model, index, 10)
explainer.save(grid, '.', name + 'occlusion_sensitivity_10_ResNet50.png')
#>>>>>>> new stuff
#Save the Grad Cam visualization , focused on layer conv5_block3_3_conv in ResNet50
explainer = GradCAM()
grid = explainer.explain(data, model, 'conv5_block3_3_conv', index)
explainer.save(grid, '.', name + 'grad_cam_ResNet50.png')
# Save the Activations visualizations
data = (np.array([img]), None)
explainer = ExtractActivations()
grid = explainer.explain(data, model, ['conv1'])
explainer.save(grid, '.', name + 'activations_ResNet50.png')
#-----------------------------------------------
#For MobileNet
# Save the Activations visualizations
data = (np.array([img]), None)
explainer = ExtractActivations()
model = tf.keras.applications.mobilenet.MobileNet(weights='imagenet',
include_top=True)
grid = explainer.explain(data, model, ['conv1'])
explainer.save(grid, '.', name + 'activations_MobileNet.png')
print("MOBILENET")
model.summary()
#>>>>>> newstuff
#Save the Grad Cam visualization --focused on layer conv_pw_13 in MobileNet
explainer = GradCAM()
grid = explainer.explain(data, model, 'conv_pw_13', index)
explainer.save(grid, '.', name + 'grad_cam_MobileNet.png')
# Compute Occlusion Sensitivity for patch_size 20
explainer = OcclusionSensitivity()
grid = explainer.explain(data, model, index, 20)
explainer.save(grid, '.', name + 'occlusion_sensitivity_20_MobileNet.png')
# Compute Occlusion Sensitivity for patch_size 10
grid = explainer.explain(data, model, index, 10)
explainer.save(grid, '.', name + 'occlusion_sensitivity_10_MobileNet.png')