Alibrown's picture
Update app.py
8b69324 verified
raw
history blame
2.42 kB
import gradio as gr
from diffusers import DiffusionPipeline
import torch
# Pipeline global vorinitialisieren für bessere Performance
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev")
pipe.load_lora_weights("enhanceaiteam/Flux-uncensored")
# GPU-Optimierung
if torch.cuda.is_available():
pipe = pipe.to("cuda")
pipe.enable_model_cpu_offload() # Zusätzliche Speicheroptimierung
def generate_image(
prompt,
width=512,
height=512,
num_inference_steps=50,
guidance_scale=7.5
):
try:
image = pipe(
prompt=prompt,
width=width,
height=height,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale
).images[0]
return image
except Exception as e:
print(f"Fehler bei Bildgenerierung: {e}")
return None
# Gradio-Interface mit erweiterten Optionen
def create_gradio_interface():
with gr.Blocks() as demo:
gr.Markdown("# Flux Bildgenerator")
with gr.Row():
prompt = gr.Textbox(label="Bildprompt", lines=2)
width = gr.Slider(minimum=64, maximum=2048, value=512, label="Breite")
height = gr.Slider(minimum=64, maximum=2048, value=512, label="Höhe")
with gr.Row():
steps = gr.Slider(minimum=10, maximum=100, value=50, label="Inference Steps")
guidance = gr.Slider(minimum=1, maximum=15, value=7.5, label="Guidance Scale")
generate_btn = gr.Button("Bild generieren", variant="primary")
output_image = gr.Image(label="Generiertes Bild")
generate_btn.click(
fn=generate_image,
inputs=[prompt, width, height, steps, guidance],
outputs=output_image
)
# Beispiel-Prompts
gr.Examples(
examples=[
["Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"],
["Cyberpunk city landscape at night"],
["Realistic portrait of a wolf in mountain terrain"]
],
inputs=[prompt]
)
return demo
# Hauptausführung
def main():
interface = create_gradio_interface()
interface.launch(
share=True, # Für öffentlichen Zugang
debug=True # Detaillierte Fehlermeldungen
)
if __name__ == "__main__":
main()