multimodalart HF staff commited on
Commit
b213a9c
1 Parent(s): bed2a9b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -11
app.py CHANGED
@@ -9,14 +9,14 @@ from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5Tokenize
9
  dtype = torch.bfloat16
10
  device = "cuda"
11
 
12
- sd3_repo = "stabilityai/stable-diffusion-3-medium-diffusers"
13
- scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained (sd3_repo, subfolder="scheduler")
14
  text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
15
  tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
16
- text_encoder_2 = T5EncoderModel.from_pretrained(sd3_repo, subfolder="text_encoder_3", torch_dtype=dtype)
17
- tokenizer_2 = T5TokenizerFast.from_pretrained(sd3_repo, subfolder="tokenizer_3", torch_dtype=dtype)
18
- vae = AutoencoderKL.from_pretrained("diffusers-internal-dev/FLUX.1-schnell", subfolder="vae", torch_dtype=dtype)
19
- transformer = FluxTransformer2DModel.from_pretrained("diffusers-internal-dev/FLUX.1-schnell", subfolder="transformer", torch_dtype=dtype)
20
 
21
  device = "cuda" if torch.cuda.is_available() else "cpu"
22
 
@@ -34,7 +34,7 @@ MAX_SEED = np.iinfo(np.int32).max
34
  MAX_IMAGE_SIZE = 2048
35
 
36
  @spaces.GPU()
37
- def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
38
  if randomize_seed:
39
  seed = random.randint(0, MAX_SEED)
40
  generator = torch.Generator().manual_seed(seed)
@@ -44,7 +44,7 @@ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_in
44
  height = height,
45
  num_inference_steps = num_inference_steps,
46
  generator = generator,
47
- guidance_scale=0.0
48
  ).images[0]
49
  return image, seed
50
 
@@ -114,14 +114,21 @@ with gr.Blocks(css=css) as demo:
114
  )
115
 
116
  with gr.Row():
117
-
 
 
 
 
 
 
 
118
 
119
  num_inference_steps = gr.Slider(
120
  label="Number of inference steps",
121
  minimum=1,
122
  maximum=50,
123
  step=1,
124
- value=4,
125
  )
126
 
127
  gr.Examples(
@@ -135,7 +142,7 @@ with gr.Blocks(css=css) as demo:
135
  gr.on(
136
  triggers=[run_button.click, prompt.submit],
137
  fn = infer,
138
- inputs = [prompt, seed, randomize_seed, width, height, num_inference_steps],
139
  outputs = [result, seed]
140
  )
141
 
 
9
  dtype = torch.bfloat16
10
  device = "cuda"
11
 
12
+ bfl_repo = "black-forest-labs/FLUX.1-schnell"
13
+ scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(bfl_repo, subfolder="scheduler", revision="refs/pr/1")
14
  text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
15
  tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
16
+ text_encoder_2 = T5EncoderModel.from_pretrained(bfl_repo, subfolder="text_encoder_2", torch_dtype=dtype, revision="refs/pr/1")
17
+ tokenizer_2 = T5TokenizerFast.from_pretrained(bfl_repo, subfolder="tokenizer_2", torch_dtype=dtype, revision="refs/pr/1")
18
+ vae = AutoencoderKL.from_pretrained(bfl_repo, subfolder="vae", torch_dtype=dtype, revision="refs/pr/1")
19
+ transformer = FluxTransformer2DModel.from_pretrained("diffusers-internal-dev/FLUX.1-dev", subfolder="transformer", torch_dtype=dtype, revision="refs/pr/1")
20
 
21
  device = "cuda" if torch.cuda.is_available() else "cpu"
22
 
 
34
  MAX_IMAGE_SIZE = 2048
35
 
36
  @spaces.GPU()
37
+ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=5.0, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
38
  if randomize_seed:
39
  seed = random.randint(0, MAX_SEED)
40
  generator = torch.Generator().manual_seed(seed)
 
44
  height = height,
45
  num_inference_steps = num_inference_steps,
46
  generator = generator,
47
+ guidance_scale=guidance_scale
48
  ).images[0]
49
  return image, seed
50
 
 
114
  )
115
 
116
  with gr.Row():
117
+
118
+ guidance_scale = gr.Slider(
119
+ label="Guidance Scale",
120
+ minimum=1,
121
+ maximum=15,
122
+ step=1,
123
+ value=5.0,
124
+ )
125
 
126
  num_inference_steps = gr.Slider(
127
  label="Number of inference steps",
128
  minimum=1,
129
  maximum=50,
130
  step=1,
131
+ value=28,
132
  )
133
 
134
  gr.Examples(
 
142
  gr.on(
143
  triggers=[run_button.click, prompt.submit],
144
  fn = infer,
145
+ inputs = [prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
146
  outputs = [result, seed]
147
  )
148