import torch, torchvision from torchvision import transforms import numpy as np import gradio as gr from PIL import Image from pytorch_grad_cam import GradCAM from pytorch_grad_cam.utils.image import show_cam_on_image from custom_resnet import Net from PIL import Image import io model = Net('batch') model.load_state_dict(torch.load("model.pth", map_location=torch.device('cpu')), strict=False) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') def inference(input_img_files, transparency = 0.5, target_layer_number = -1): confidences_list = [] visualizations_list = [] for input_img_file in input_img_files: # Convert the temporary file wrapper to a PIL Image with open(input_img_file.name, "rb") as f: input_img = Image.open(io.BytesIO(f.read())).convert("RGB") transform = transforms.ToTensor() org_img = input_img input_img = transform(input_img) # input_img = input_img input_img = input_img.unsqueeze(0) outputs = model(input_img) softmax = torch.nn.Softmax(dim=0) o = softmax(outputs.flatten()) confidences = {classes[i]: float(o[i]) for i in range(10)} confidences_list.append(confidences) _, prediction = torch.max(outputs, 1) target_layers = [model.layer2[target_layer_number]] cam = GradCAM(model=model, target_layers=target_layers, use_cuda=False) grayscale_cam = cam(input_tensor=input_img, targets=None) grayscale_cam = grayscale_cam[0, :] img = input_img.squeeze(0) rgb_img = np.transpose(img, (1, 2, 0)) rgb_img = rgb_img.numpy() visualization = show_cam_on_image(org_img/255, grayscale_cam, use_rgb=True, image_weight=transparency) visualizations_list.append(visualization) return confidences_list, visualizations_list title = "CIFAR10 trained on ResNet18 Model with GradCAM" description = "A simple Gradio interface to infer on ResNet model, and get GradCAM results" examples = [["cat.png", 0.5, -1],["dog.png", 0.5, -1]] demo = gr.Interface( inference, inputs=[ gr.inputs.File(file_count="multiple"), gr.Slider(0, 1, value=0.5, label="Opacity of GradCAM"), gr.Slider(-2, -1, value=-2, step=1, label="Which Layer?") ], outputs = [gr.Label(num_top_classes=3), gr.Image(shape=(32, 32), label="Output", style={"width": "128px", "height": "128px"})], title=title, description=description, examples=examples, ) demo.launch()