edderyouch's picture
Update app.py
64b3ff1 verified
raw
history blame
640 Bytes
from transformers import pipeline
# Load the model
model_name = "soufyane/gemma_data_science"
text_generator = pipeline("text-generation", model=model_name)
def process_text_gemma(input_text):
# Generate text using the model
response = text_generator(f"question: {input_text}", max_length=256)[0]['generated_text']
return response
def main(input_text):
return process_text_gemma(input_text[0])
import gradio as gr
gr.Interface(
fn=main,
inputs=["text"],
outputs=["text"],
title="Gemma Data Science Model",
description="This is a text-to-text model for data science tasks.",
live=True
).launch()