Spaces:
Runtime error
Runtime error
hemanth678599
commited on
Commit
•
50867f7
1
Parent(s):
5338fad
Update app.py
Browse files
app.py
CHANGED
@@ -15,6 +15,11 @@ warnings.filterwarnings("ignore", category=FutureWarning)
|
|
15 |
|
16 |
# Get the Hugging Face token from environment variables
|
17 |
hf_token = os.getenv("hemanth")
|
|
|
|
|
|
|
|
|
|
|
18 |
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
|
19 |
headers = {"Authorization": f"Bearer {hf_token}"}
|
20 |
|
@@ -26,9 +31,10 @@ tokenizer = MarianTokenizer.from_pretrained(model_name)
|
|
26 |
# Function to query the image generation API
|
27 |
def query_image_api(prompt):
|
28 |
try:
|
29 |
-
logging.info("Querying image generation API
|
30 |
response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
|
31 |
response.raise_for_status() # Raise an error for bad status codes
|
|
|
32 |
return response.content
|
33 |
except requests.exceptions.RequestException as e:
|
34 |
logging.error(f"Error querying image generation API: {e}")
|
@@ -53,7 +59,8 @@ def generate_image(translated_text):
|
|
53 |
if image_bytes:
|
54 |
return Image.open(io.BytesIO(image_bytes))
|
55 |
else:
|
56 |
-
|
|
|
57 |
|
58 |
# Load GPT-Neo model for creative text generation
|
59 |
text_gen_model = pipeline("text-generation", model="EleutherAI/gpt-neo-1.3B")
|
@@ -74,6 +81,8 @@ def process_input(tamil_input):
|
|
74 |
translated_output = translate_text(tamil_input)
|
75 |
creative_text = generate_creative_text(translated_output)
|
76 |
image = generate_image(translated_output)
|
|
|
|
|
77 |
return translated_output, creative_text, image
|
78 |
else:
|
79 |
return "No input provided.", None, None
|
@@ -94,4 +103,4 @@ iface = gr.Interface(
|
|
94 |
)
|
95 |
|
96 |
# Launch the Gradio interface
|
97 |
-
iface.launch(share=False)
|
|
|
15 |
|
16 |
# Get the Hugging Face token from environment variables
|
17 |
hf_token = os.getenv("hemanth")
|
18 |
+
if not hf_token:
|
19 |
+
raise ValueError("Hugging Face token is not set correctly.")
|
20 |
+
else:
|
21 |
+
logging.info("Hugging Face token loaded successfully.")
|
22 |
+
|
23 |
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
|
24 |
headers = {"Authorization": f"Bearer {hf_token}"}
|
25 |
|
|
|
31 |
# Function to query the image generation API
|
32 |
def query_image_api(prompt):
|
33 |
try:
|
34 |
+
logging.info(f"Querying image generation API with prompt: {prompt}")
|
35 |
response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
|
36 |
response.raise_for_status() # Raise an error for bad status codes
|
37 |
+
logging.info(f"Received response with status code: {response.status_code}")
|
38 |
return response.content
|
39 |
except requests.exceptions.RequestException as e:
|
40 |
logging.error(f"Error querying image generation API: {e}")
|
|
|
59 |
if image_bytes:
|
60 |
return Image.open(io.BytesIO(image_bytes))
|
61 |
else:
|
62 |
+
logging.error("Failed to generate image.")
|
63 |
+
return None # Return None if image generation failed
|
64 |
|
65 |
# Load GPT-Neo model for creative text generation
|
66 |
text_gen_model = pipeline("text-generation", model="EleutherAI/gpt-neo-1.3B")
|
|
|
81 |
translated_output = translate_text(tamil_input)
|
82 |
creative_text = generate_creative_text(translated_output)
|
83 |
image = generate_image(translated_output)
|
84 |
+
if image is None:
|
85 |
+
image = "Image generation failed."
|
86 |
return translated_output, creative_text, image
|
87 |
else:
|
88 |
return "No input provided.", None, None
|
|
|
103 |
)
|
104 |
|
105 |
# Launch the Gradio interface
|
106 |
+
iface.launch(share=False)
|