Spaces:
Runtime error
Runtime error
Update app.py
Browse filesConditionals on examples
app.py
CHANGED
@@ -290,8 +290,43 @@ We anecdotally determined that when trained at lower steps the encoded hand mode
|
|
290 |
with gr.Column():
|
291 |
output_image = gr.Gallery(label='Output Image', show_label=False, elem_id="gallery").style(grid=2, height='auto')
|
292 |
|
293 |
-
|
294 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
295 |
[
|
296 |
"a woman is making an ok sign in front of a painting",
|
297 |
"longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
@@ -323,7 +358,7 @@ We anecdotally determined that when trained at lower steps the encoded hand mode
|
|
323 |
fn=infer,
|
324 |
cache_examples=False, #cache_examples=True,
|
325 |
)
|
326 |
-
|
327 |
inputs = [prompt_input, negative_prompt, input_image, model_type]
|
328 |
submit_btn.click(fn=infer, inputs=inputs, outputs=[output_image])
|
329 |
|
|
|
290 |
with gr.Column():
|
291 |
output_image = gr.Gallery(label='Output Image', show_label=False, elem_id="gallery").style(grid=2, height='auto')
|
292 |
|
293 |
+
if model_type=="Standard":
|
294 |
+
gr.Examples(
|
295 |
+
examples=[
|
296 |
+
[
|
297 |
+
"a woman is making an ok sign in front of a painting",
|
298 |
+
"longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
299 |
+
"example.png"
|
300 |
+
],
|
301 |
+
[
|
302 |
+
"a man with his hands up in the air making a rock sign",
|
303 |
+
"longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
304 |
+
"example1.png"
|
305 |
+
],
|
306 |
+
[
|
307 |
+
"a man is making a thumbs up gesture",
|
308 |
+
"longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
309 |
+
"example2.png"
|
310 |
+
],
|
311 |
+
[
|
312 |
+
"a woman is holding up her hand in front of a window",
|
313 |
+
"longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
314 |
+
"example3.png"
|
315 |
+
],
|
316 |
+
[
|
317 |
+
"a man with his finger on his lips",
|
318 |
+
"longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
319 |
+
"example4.png"
|
320 |
+
],
|
321 |
+
],
|
322 |
+
inputs=[prompt_input, negative_prompt, input_image, model_type],
|
323 |
+
outputs=[output_image],
|
324 |
+
fn=infer,
|
325 |
+
cache_examples=True,
|
326 |
+
)
|
327 |
+
elif model_type=="Hand Encoding":
|
328 |
+
gr.Examples(
|
329 |
+
examples=[
|
330 |
[
|
331 |
"a woman is making an ok sign in front of a painting",
|
332 |
"longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
|
|
358 |
fn=infer,
|
359 |
cache_examples=False, #cache_examples=True,
|
360 |
)
|
361 |
+
|
362 |
inputs = [prompt_input, negative_prompt, input_image, model_type]
|
363 |
submit_btn.click(fn=infer, inputs=inputs, outputs=[output_image])
|
364 |
|