benjamin-paine commited on
Commit
beab471
·
verified ·
1 Parent(s): 95f8c36

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -29
app.py CHANGED
@@ -7,44 +7,17 @@ import torch
7
  import spaces
8
 
9
  from diffusers.pipelines import Lumina2Text2ImgPipeline
10
- from diffusers.models.transformers.transformer_lumina2 import Lumina2Transformer2DModel
11
-
12
- from diffusers import (
13
- AutoencoderKL,
14
- FlowMatchEulerDiscreteScheduler
15
- )
16
- from diffusers.loaders.single_file_utils import (
17
- convert_sd3_transformer_checkpoint_to_diffusers,
18
- )
19
- from transformers import (
20
- Gemma2Model,
21
- GemmaTokenizer
22
- )
23
 
24
  default_system_prompt = "You are an assistant designed to generate superior images with the superior degree of image-text alignment based on textual prompts or user prompts."
25
  device = "cuda" if torch.cuda.is_available() else "cpu"
26
  model_repo_id = "Alpha-VLLM/Lumina-Image-2.0"
27
- transformer_repo_id = "benjamin-paine/Lumina-Image-2.0" # Temporarily fixed, change when main repo gets updated
28
  if torch.cuda.is_available():
29
  torch_dtype = torch.bfloat16
30
  else:
31
  torch_dtype = torch.float32
32
 
33
- ###
34
- transformer = Lumina2Transformer2DModel.from_pretrained(transformer_repo_id, subfolder="transformer")
35
- vae = AutoencoderKL.from_pretrained(model_repo_id, subfolder="vae")
36
- text_encoder = Gemma2Model.from_pretrained(model_repo_id, subfolder="text_encoder")
37
- tokenizer = GemmaTokenizer.from_pretrained(model_repo_id, subfolder="tokenizer")
38
- scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(model_repo_id, subfolder="scheduler")
39
-
40
- ###
41
- pipe = Lumina2Text2ImgPipeline(
42
- vae=vae,
43
- text_encoder=text_encoder,
44
- transformer=transformer,
45
- tokenizer=tokenizer,
46
- scheduler=scheduler,
47
- )
48
  pipe.to(device, torch_dtype)
49
 
50
  MAX_SEED = np.iinfo(np.int32).max
 
7
  import spaces
8
 
9
  from diffusers.pipelines import Lumina2Text2ImgPipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  default_system_prompt = "You are an assistant designed to generate superior images with the superior degree of image-text alignment based on textual prompts or user prompts."
12
  device = "cuda" if torch.cuda.is_available() else "cpu"
13
  model_repo_id = "Alpha-VLLM/Lumina-Image-2.0"
14
+
15
  if torch.cuda.is_available():
16
  torch_dtype = torch.bfloat16
17
  else:
18
  torch_dtype = torch.float32
19
 
20
+ pipe = Lumina2Text2ImgPipeline.from_pretrained(model_repo_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  pipe.to(device, torch_dtype)
22
 
23
  MAX_SEED = np.iinfo(np.int32).max