import torch from diffusers.utils import load_image, check_min_version from controlnet_flux import FluxControlNetModel from transformer_flux import FluxTransformer2DModel from pipeline_flux_controlnet_inpaint import FluxControlNetInpaintingPipeline check_min_version("0.30.2") # Set image path , mask path and prompt image_path='https://huggingface.co./alimama-creative/FLUX.1-dev-Controlnet-Inpainting-Alpha/resolve/main/images/bucket.png', mask_path='https://huggingface.co./alimama-creative/FLUX.1-dev-Controlnet-Inpainting-Alpha/resolve/main/images/bucket_mask.jpeg', prompt='a person wearing a white shoe, carrying a white bucket with text "FLUX" on it' # Build pipeline controlnet = FluxControlNetModel.from_pretrained("alimama-creative/FLUX.1-dev-Controlnet-Inpainting-Alpha", torch_dtype=torch.bfloat16) transformer = FluxTransformer2DModel.from_pretrained( "black-forest-labs/FLUX.1-dev", subfolder='transformer', torch_dytpe=torch.bfloat16 ) pipe = FluxControlNetInpaintingPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", controlnet=controlnet, transformer=transformer, torch_dtype=torch.bfloat16 ).to("cuda") pipe.transformer.to(torch.bfloat16) pipe.controlnet.to(torch.bfloat16) # Load image and mask size = (768, 768) image = load_image(image_path).convert("RGB").resize(size) mask = load_image(mask_path).convert("RGB").resize(size) generator = torch.Generator(device="cuda").manual_seed(24) # Inpaint result = pipe( prompt=prompt, height=size[1], width=size[0], control_image=image, control_mask=mask, num_inference_steps=28, generator=generator, controlnet_conditioning_scale=0.9, guidance_scale=3.5, negative_prompt="", true_guidance_scale=3.5 ).images[0] result.save('flux_inpaint.png') print("Successfully inpaint image")