Spaces:
Running
Running
import streamlit as st | |
import os | |
from transformers import pipeline | |
import requests | |
from deep_translator import GoogleTranslator | |
import io | |
from PIL import Image | |
import time | |
API_TOKEN = os.getenv("hugkey") | |
st.header("Multi-model project") | |
st.write("This app will convert regional language sentence into english and also generate text and image related to the context") | |
text = st.text_input("Enter a text",placeholder="Type a sentence in Your Language") | |
#tran_API_URL = "https://api-inference.huggingface.co/models/Helsinki-NLP/opus-mt-mul-en" | |
#tran_API_URL = "https://api-inference.huggingface.co/models/google-t5/t5-small" | |
#tran_API_URL = "https://api-inference.huggingface.co/models/facebook/m2m100_418M" | |
#generate_API_URL = "https://api-inference.huggingface.co/models/openai-community/gpt2" | |
generate_API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-1B" | |
image_API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large" | |
headers = {"Authorization": f"Bearer {API_TOKEN}"} | |
def Translate_query(input_text): | |
translator = GoogleTranslator(target='en') | |
translation = translator.translate(input_text) | |
return translation | |
# payload = {"inputs": f"{input_text}", "parameters": {"forced_bos_token_id": 128000}} | |
# response = requests.post(tran_API_URL, headers=headers, json=payload) | |
# return response.json() | |
def Generate_query(payload): | |
response = requests.post(generate_API_URL, headers=headers, json=payload) | |
return response.json() | |
def Image_query(payload): | |
response = requests.post(image_API_URL, headers=headers, json=payload) | |
return response | |
def stream_data(text_data): | |
for word in text_data.split(" "): | |
yield word + " " | |
time.sleep(0.05) | |
if st.button("Generate"): | |
col1,col2 = st.columns(2) | |
with col1: | |
translated_output = Translate_query(text) | |
#translated_output = translated_output[0]["translation_text"] | |
st.subheader("Text Translation") | |
st.success(translated_output) | |
st.subheader("Text Generation") | |
generated_output = Generate_query({"inputs": translated_output,}) | |
generated_output = generated_output[0]['generated_text'] | |
st.write(stream_data(generated_output)) | |
with col2: | |
st.subheader("Image Generation") | |
image_response = Image_query({"inputs": translated_output,}) | |
if image_response.status_code == 200: | |
image_bytes = image_response.content # Get the binary content | |
try: | |
image = Image.open(io.BytesIO(image_bytes))# Open the image using PIL | |
# Display the image | |
st.image(image) | |
except Exception as e: | |
print("Error opening image:", e) | |
else: | |
print("Failed to retrieve image:", image_response.status_code, image_response.text) |