ucaslx commited on
Commit
011d756
·
1 Parent(s): c569529
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -5,6 +5,7 @@ import cv2
5
  import insightface
6
  import gradio as gr
7
  import numpy as np
 
8
  from huggingface_hub import snapshot_download
9
  from transformers import CLIPVisionModelWithProjection,CLIPImageProcessor
10
  from kolors.pipelines.pipeline_stable_diffusion_xl_chatglm_256_ipadapter_FaceID import StableDiffusionXLPipeline
@@ -30,7 +31,6 @@ unet = UNet2DConditionModel.from_pretrained(f"{ckpt_dir}/unet", revision=None).h
30
  clip_image_encoder = CLIPVisionModelWithProjection.from_pretrained(f'{ckpt_dir_faceid}/clip-vit-large-patch14-336', ignore_mismatched_sizes=True)
31
  clip_image_encoder.to(device)
32
  clip_image_processor = CLIPImageProcessor(size = 336, crop_size = 336)
33
- print(os.environ['test'])
34
 
35
  pipe = StableDiffusionXLPipeline(
36
  vae = vae,
@@ -84,6 +84,7 @@ def infer(prompt,
84
  guidance_scale = 5.0,
85
  num_inference_steps = 50
86
  ):
 
87
  if randomize_seed:
88
  seed = random.randint(0, MAX_SEED)
89
  generator = torch.Generator().manual_seed(seed)
 
5
  import insightface
6
  import gradio as gr
7
  import numpy as np
8
+ import os
9
  from huggingface_hub import snapshot_download
10
  from transformers import CLIPVisionModelWithProjection,CLIPImageProcessor
11
  from kolors.pipelines.pipeline_stable_diffusion_xl_chatglm_256_ipadapter_FaceID import StableDiffusionXLPipeline
 
31
  clip_image_encoder = CLIPVisionModelWithProjection.from_pretrained(f'{ckpt_dir_faceid}/clip-vit-large-patch14-336', ignore_mismatched_sizes=True)
32
  clip_image_encoder.to(device)
33
  clip_image_processor = CLIPImageProcessor(size = 336, crop_size = 336)
 
34
 
35
  pipe = StableDiffusionXLPipeline(
36
  vae = vae,
 
84
  guidance_scale = 5.0,
85
  num_inference_steps = 50
86
  ):
87
+ print(os.environ['test'])
88
  if randomize_seed:
89
  seed = random.randint(0, MAX_SEED)
90
  generator = torch.Generator().manual_seed(seed)