ariG23498's picture
ariG23498 HF staff
Update app.py
0b5267a verified
import gradio as gr
from transformers import pipeline
pipe = pipeline(
"image-classification",
model="ariG23498/vit_base_patch16_224.augreg2_in21k_ft_in1k.ft_food101"
)
def classify(image):
results = pipe(image)
return {result["label"]: round(result["score"], 2) for result in results}
demo = gr.Interface(
fn=classify,
inputs=gr.Image(type="pil", label="Upload an Image"),
outputs=gr.Label(num_top_classes=3, label="Top Predictions"),
examples=[["./sushi.png", "sushi"]],
title="Food Classification with ViT πŸ₯—πŸ£",
description=(
"# Explore Food Classification with Vision Transformers (ViT) πŸ”\n\n"
"This application demonstrates the power of Vision Transformers (ViT) for food classification tasks, "
"leveraging the pre-trained model `vit_base_patch16_224.augreg2_in21k_ft_in1k.ft_food101` fine-tuned on the Food-101 dataset. "
"With just a few lines of code, you can integrate state-of-the-art image classification models using the Hugging Face `pipeline` API.\n\n"
"## How to Use:\n"
"1. Upload an image of food (e.g., sushi, pizza, or burgers).\n"
"2. The model will classify the image and provide the predicted labels along with confidence scores.\n"
"3. Try the provided example for a quick start or test your own food images!\n\n"
)
)
demo.launch()