zysong212 commited on
Commit
904cde3
·
1 Parent(s): 549390b

Reinitialize repository without history

Browse files
Files changed (1) hide show
  1. app.py +16 -3
app.py CHANGED
@@ -1,8 +1,15 @@
1
  import gradio as gr
 
2
 
3
  import torch
4
  from PIL import Image
 
 
 
 
 
5
 
 
6
  from depthmaster import DepthMasterPipeline
7
  from depthmaster.modules.unet_2d_condition import UNet2DConditionModel
8
 
@@ -21,9 +28,15 @@ torch_dtype = torch.float32
21
 
22
  # pipe = DepthMasterPipeline.from_pretrained('eval', torch_dtype=torch_dtype)
23
  # unet = UNet2DConditionModel.from_pretrained(os.path.join('eval', f'unet'))
24
- pipe = DepthMasterPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
25
- unet = UNet2DConditionModel.from_pretrained(model_repo_id, subfolder="unet", torch_dtype=torch_dtype)
26
- pipe.unet = unet
 
 
 
 
 
 
27
 
28
  try:
29
  pipe.enable_xformers_memory_efficient_attention()
 
1
  import gradio as gr
2
+ import os
3
 
4
  import torch
5
  from PIL import Image
6
+ from diffusers import (
7
+ AutoencoderKL,
8
+ DiffusionPipeline,
9
+ # UNet2DConditionModel,
10
+ )
11
 
12
+ from transformers import CLIPTextModel, CLIPTokenizer
13
  from depthmaster import DepthMasterPipeline
14
  from depthmaster.modules.unet_2d_condition import UNet2DConditionModel
15
 
 
28
 
29
  # pipe = DepthMasterPipeline.from_pretrained('eval', torch_dtype=torch_dtype)
30
  # unet = UNet2DConditionModel.from_pretrained(os.path.join('eval', f'unet'))
31
+ # pipe = DepthMasterPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
32
+ # unet = UNet2DConditionModel.from_pretrained(model_repo_id, subfolder="unet", torch_dtype=torch_dtype)
33
+ # pipe.unet = unet
34
+ vae = AutoencoderKL.from_pretrained(model_repo_id, subfolder="vae", torch_dtype=torch_dtype, allow_pickle=False)
35
+ unet = UNet2DConditionModel.from_pretrained(model_repo_id, subfolder="unet", torch_dtype=torch_dtype, allow_pickle=False)
36
+ text_encoder = CLIPTextModel.from_pretrained(model_repo_id, subfolder="text_encoder", torch_dtype=torch_dtype)
37
+ tokenizer = CLIPTokenizer.from_pretrained(model_repo_id, subfolder="tokenizer", torch_dtype=torch_dtype)
38
+ pipe = DepthMasterPipeline(vae=vae, unet=unet, text_encoder=text_encoder, tokenizer=tokenizer)
39
+
40
 
41
  try:
42
  pipe.enable_xformers_memory_efficient_attention()