Source code for lecture2notes.models.slide_classifier.grad_cam

# Grad-CAM implementation based on https://github.com/kazuto1011/grad-cam-pytorch
# by Kazuto Nakashima (http://kazuto1011.github.io) which was created on 2017-05-26

import argparse
import os
from collections.abc import Sequence

import cv2
import matplotlib.cm as cm
import numpy as np
import torch
import torch.nn as nn
from PIL import Image
from torch.nn import functional as F
from torchvision import transforms
from tqdm import tqdm


class _BaseWrapper:
    def __init__(self, model):
        super(_BaseWrapper, self).__init__()
        self.device = next(model.parameters()).device
        self.model = model
        self.handlers = []  # a set of hook function handlers

    def _encode_one_hot(self, ids):
        one_hot = torch.zeros_like(self.logits).to(self.device)
        one_hot.scatter_(1, ids, 1.0)
        return one_hot

    def forward(self, image):
        self.image_shape = image.shape[2:]
        self.logits = self.model(image)
        self.probs = F.softmax(self.logits, dim=1)
        return self.probs.sort(dim=1, descending=True)  # ordered results

    def backward(self, ids):
        """
        Class-specific backpropagation
        """
        one_hot = self._encode_one_hot(ids)
        self.model.zero_grad()
        self.logits.backward(gradient=one_hot, retain_graph=True)

    def generate(self):
        raise NotImplementedError

    def remove_hook(self):
        """
        Remove all the forward/backward hook functions
        """
        for handle in self.handlers:
            handle.remove()


[docs]class BackPropagation(_BaseWrapper):
[docs] def forward(self, image): self.image = image.requires_grad_() return super(BackPropagation, self).forward(self.image)
[docs] def generate(self): gradient = self.image.grad.clone() self.image.grad.zero_() return gradient
[docs]class GuidedBackPropagation(BackPropagation): """ "Striving for Simplicity: the All Convolutional Net" https://arxiv.org/pdf/1412.6806.pdf Look at Figure 1 on page 8. """ def __init__(self, model): super(GuidedBackPropagation, self).__init__(model) def backward_hook(module, grad_in, grad_out): # Cut off negative gradients if isinstance(module, nn.ReLU): return (F.relu(grad_in[0]),) for module in self.model.named_modules(): self.handlers.append(module[1].register_backward_hook(backward_hook))
[docs]class Deconvnet(BackPropagation): """ "Striving for Simplicity: the All Convolutional Net" https://arxiv.org/pdf/1412.6806.pdf Look at Figure 1 on page 8. """ def __init__(self, model): super(Deconvnet, self).__init__(model) def backward_hook(module, grad_in, grad_out): # Cut off negative gradients and ignore ReLU if isinstance(module, nn.ReLU): return (F.relu(grad_out[0]),) for module in self.model.named_modules(): self.handlers.append(module[1].register_backward_hook(backward_hook))
[docs]class GradCAM(_BaseWrapper): """ "Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization" https://arxiv.org/pdf/1610.02391.pdf Look at Figure 2 on page 4 """ def __init__(self, model, candidate_layers=None): super(GradCAM, self).__init__(model) self.fmap_pool = {} self.grad_pool = {} self.candidate_layers = candidate_layers # list def save_fmaps(key): def forward_hook(module, input, output): self.fmap_pool[key] = output.detach() return forward_hook def save_grads(key): def backward_hook(module, grad_in, grad_out): self.grad_pool[key] = grad_out[0].detach() return backward_hook # If any candidates are not specified, the hook is registered to all the layers. for name, module in self.model.named_modules(): if self.candidate_layers is None or name in self.candidate_layers: self.handlers.append(module.register_forward_hook(save_fmaps(name))) self.handlers.append(module.register_backward_hook(save_grads(name))) @staticmethod def _find(pool, target_layer): if target_layer in pool.keys(): return pool[target_layer] raise ValueError("Invalid layer name: {}".format(target_layer))
[docs] def generate(self, target_layer): fmaps = self._find(self.fmap_pool, target_layer) grads = self._find(self.grad_pool, target_layer) weights = F.adaptive_avg_pool2d(grads, 1) gcam = torch.mul(fmaps, weights).sum(dim=1, keepdim=True) gcam = F.relu(gcam) gcam = F.interpolate( gcam, self.image_shape, mode="bilinear", align_corners=False ) B, C, H, W = gcam.shape gcam = gcam.view(B, -1) gcam -= gcam.min(dim=1, keepdim=True)[0] gcam /= gcam.max(dim=1, keepdim=True)[0] gcam = gcam.view(B, C, H, W) return gcam
[docs]def occlusion_sensitivity( model, images, ids, mean=None, patch=35, stride=1, n_batches=128 ): """ "Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization" https://arxiv.org/pdf/1610.02391.pdf Look at Figure A5 on page 17 Originally proposed in: "Visualizing and Understanding Convolutional Networks" https://arxiv.org/abs/1311.2901 """ torch.set_grad_enabled(False) model.eval() mean = mean if mean else 0 patch_H, patch_W = patch if isinstance(patch, Sequence) else (patch, patch) pad_H, pad_W = patch_H // 2, patch_W // 2 # Padded image images = F.pad(images, (pad_W, pad_W, pad_H, pad_H), value=mean) B, _, H, W = images.shape new_H = (H - patch_H) // stride + 1 new_W = (W - patch_W) // stride + 1 # Prepare sampling grids anchors = [] grid_h = 0 while grid_h <= H - patch_H: grid_w = 0 while grid_w <= W - patch_W: grid_w += stride anchors.append((grid_h, grid_w)) grid_h += stride # Baseline score without occlusion baseline = model(images).detach().gather(1, ids) # Compute per-pixel logits scoremaps = [] for i in tqdm(range(0, len(anchors), n_batches), leave=False): batch_images = [] batch_ids = [] for grid_h, grid_w in anchors[i : i + n_batches]: images_ = images.clone() images_[..., grid_h : grid_h + patch_H, grid_w : grid_w + patch_W] = mean batch_images.append(images_) batch_ids.append(ids) batch_images = torch.cat(batch_images, dim=0) batch_ids = torch.cat(batch_ids, dim=0) scores = model(batch_images).detach().gather(1, batch_ids) scoremaps += list(torch.split(scores, B)) diffmaps = torch.cat(scoremaps, dim=1) - baseline diffmaps = diffmaps.view(B, new_H, new_W) return diffmaps
[docs]def get_device(cuda): cuda = cuda and torch.cuda.is_available() device = torch.device("cuda" if cuda else "cpu") if cuda: current_device = torch.cuda.current_device() print("Device:", torch.cuda.get_device_name(current_device)) else: print("Device: CPU") return device
[docs]def load_images(image_paths, input_size): images = [] raw_images = [] print("Images:") for i, image_path in enumerate(image_paths): print("\t#{}: {}".format(i, image_path)) image, raw_image = preprocess(image_path, input_size) images.append(image) raw_images.append(raw_image) return images, raw_images
[docs]def preprocess(image_path, input_size): # raw_image = cv2.imread(image_path) raw_image = Image.open(image_path) raw_transforms = transforms.Compose( [transforms.Resize(input_size), transforms.CenterCrop(input_size)] ) raw_image = raw_transforms(raw_image) model_transforms = transforms.Compose( [ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ] ) image = model_transforms(raw_image.copy()) return image, np.array(raw_image)
[docs]def save_gradient(filename, gradient): gradient = gradient.cpu().numpy().transpose(1, 2, 0) gradient -= gradient.min() gradient /= gradient.max() gradient *= 255.0 cv2.imwrite(filename, np.uint8(gradient))
[docs]def save_gradcam(filename, gcam, raw_image, paper_cmap=False): gcam = gcam.cpu().numpy() cmap = cm.jet_r(gcam)[..., :3] * 255.0 if paper_cmap: alpha = gcam[..., None] gcam = alpha * cmap + (1 - alpha) * raw_image else: gcam = (cmap.astype(np.float) + raw_image.astype(np.float)) / 2 cv2.imwrite(filename, np.uint8(gcam))
[docs]def save_sensitivity(filename, maps): maps = maps.cpu().numpy() scale = max(maps[maps > 0].max(), -maps[maps <= 0].min()) maps = maps / scale * 0.5 maps += 0.5 maps = cm.bwr_r(maps)[..., :3] maps = np.uint8(maps * 255.0) maps = cv2.resize(maps, (224, 224), interpolation=cv2.INTER_NEAREST) cv2.imwrite(filename, maps)
[docs]def main(args): """ Visualize model responses given multiple images """ device = get_device(not args.cpu) from slide_classifier_pytorch import SlideClassifier model = SlideClassifier.load_from_checkpoint(args.model) classes = model.hparams.classes model.to(device) model.eval() # Images images, raw_images = load_images(args.image_paths, model.hparams.input_size) images = torch.stack(images).to(device) """ Common usage: 1. Wrap your model with visualization classes defined in grad_cam.py 2. Run forward() with images 3. Run backward() with a list of specific classes 4. Run generate() to export results """ # ========================================================================= print("Vanilla Backpropagation:") bp = BackPropagation(model=model) probs, ids = bp.forward(images) # sorted for i in range(args.topk): bp.backward(ids=ids[:, [i]]) gradients = bp.generate() # Save results as image files for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) save_gradient( filename=os.path.join( args.output_dir, "{}-{}-vanilla-{}.png".format( j, model.hparams.arch, classes[ids[j, i]] ), ), gradient=gradients[j], ) # Remove all the hook function in the "model" bp.remove_hook() # ========================================================================= print("Deconvolution:") deconv = Deconvnet(model=model) _ = deconv.forward(images) for i in range(args.topk): deconv.backward(ids=ids[:, [i]]) gradients = deconv.generate() for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) save_gradient( filename=os.path.join( args.output_dir, "{}-{}-deconvnet-{}.png".format( j, model.hparams.arch, classes[ids[j, i]] ), ), gradient=gradients[j], ) deconv.remove_hook() # ========================================================================= print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:") gcam = GradCAM(model=model) _ = gcam.forward(images) gbp = GuidedBackPropagation(model=model) _ = gbp.forward(images) for i in range(args.topk): # Guided Backpropagation gbp.backward(ids=ids[:, [i]]) gradients = gbp.generate() # Grad-CAM gcam.backward(ids=ids[:, [i]]) regions = gcam.generate(target_layer=args.target_layer) for j in range(len(images)): print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) # Guided Backpropagation save_gradient( filename=os.path.join( args.output_dir, "{}-{}-guided-{}.png".format( j, model.hparams.arch, classes[ids[j, i]] ), ), gradient=gradients[j], ) # Grad-CAM save_gradcam( filename=os.path.join( args.output_dir, "{}-{}-gradcam-{}-{}.png".format( j, model.hparams.arch, args.target_layer, classes[ids[j, i]] ), ), gcam=regions[j, 0], raw_image=raw_images[j], ) # Guided Grad-CAM save_gradient( filename=os.path.join( args.output_dir, "{}-{}-guided_gradcam-{}-{}.png".format( j, model.hparams.arch, args.target_layer, classes[ids[j, i]] ), ), gradient=torch.mul(regions, gradients)[j], )
if __name__ == "__main__": parser = argparse.ArgumentParser(description="Grad CAM") parser.add_argument( "image_paths", type=str, action="append", nargs="*", help="Paths to images to process.", ) parser.add_argument( "--model", type=str, help="Path to model checkpoint file.", required=True ) parser.add_argument( "--target_layer", type=str, required=True, help="The layer to create heatmap from.", ) parser.add_argument( "--topk", type=int, default=3, help="""The top N predictions to save. The default is 3, which means each input image will be classified and the heatmaps for the top 3 classes will be saved to disk.""", ) parser.add_argument( "--output_dir", type=str, default="./grad_cam_results", help="Where to save processed images.", ) parser.add_argument( "--cpu", action="store_true", help="Run models on CPU instead of GPU though CUDA.", ) args = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) args.image_paths = args.image_paths[0] main(args)