Skip to content

Commit 5b2a1d2

Browse files
committed
test(qwen): add tests for true cfg scale without neg prompt mask
1 parent f600a36 commit 5b2a1d2

6 files changed

Lines changed: 158 additions & 0 deletions

File tree

tests/pipelines/qwenimage/test_qwenimage.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -234,3 +234,29 @@ def test_vae_tiling(self, expected_diff_max: float = 0.2):
234234
expected_diff_max,
235235
"VAE tiling should not affect the inference results",
236236
)
237+
238+
def test_true_cfg_without_negative_prompt_embeds_mask(self):
239+
components = self.get_dummy_components()
240+
pipe = self.pipeline_class(**components)
241+
pipe.to("cpu")
242+
pipe.set_progress_bar_config(disable=None)
243+
244+
inputs = self.get_dummy_inputs("cpu")
245+
prompt = inputs.pop("prompt")
246+
247+
prompt_embeds, prompt_embeds_mask = pipe.encode_prompt(
248+
prompt=prompt,
249+
device="cpu",
250+
num_images_per_prompt=1,
251+
max_sequence_length=inputs.get("max_sequence_length", 16),
252+
)
253+
254+
inputs["prompt_embeds"] = prompt_embeds
255+
inputs["prompt_embeds_mask"] = prompt_embeds_mask
256+
inputs["negative_prompt_embeds"] = prompt_embeds
257+
inputs["negative_prompt"] = None
258+
inputs["negative_prompt_embeds_mask"] = None
259+
inputs["true_cfg_scale"] = 2.0
260+
261+
image = pipe(**inputs).images
262+
self.assertIsNotNone(image)

tests/pipelines/qwenimage/test_qwenimage_controlnet.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -336,3 +336,29 @@ def test_vae_tiling(self, expected_diff_max: float = 0.2):
336336
expected_diff_max,
337337
"VAE tiling should not affect the inference results",
338338
)
339+
340+
def test_true_cfg_without_negative_prompt_embeds_mask(self):
341+
components = self.get_dummy_components()
342+
pipe = self.pipeline_class(**components)
343+
pipe.to("cpu")
344+
pipe.set_progress_bar_config(disable=None)
345+
346+
inputs = self.get_dummy_inputs("cpu")
347+
prompt = inputs.pop("prompt")
348+
349+
prompt_embeds, prompt_embeds_mask = pipe.encode_prompt(
350+
prompt=prompt,
351+
device="cpu",
352+
num_images_per_prompt=1,
353+
max_sequence_length=inputs.get("max_sequence_length", 16),
354+
)
355+
356+
inputs["prompt_embeds"] = prompt_embeds
357+
inputs["prompt_embeds_mask"] = prompt_embeds_mask
358+
inputs["negative_prompt_embeds"] = prompt_embeds
359+
inputs["negative_prompt"] = None
360+
inputs["negative_prompt_embeds_mask"] = None
361+
inputs["true_cfg_scale"] = 2.0
362+
363+
image = pipe(**inputs).images
364+
self.assertIsNotNone(image)

tests/pipelines/qwenimage/test_qwenimage_edit.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -241,3 +241,30 @@ def test_vae_tiling(self, expected_diff_max: float = 0.2):
241241
@pytest.mark.xfail(condition=True, reason="Preconfigured embeddings need to be revisited.", strict=True)
242242
def test_encode_prompt_works_in_isolation(self, extra_required_param_value_dict=None, atol=1e-4, rtol=1e-4):
243243
super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict, atol, rtol)
244+
245+
def test_true_cfg_without_negative_prompt_embeds_mask(self):
246+
components = self.get_dummy_components()
247+
pipe = self.pipeline_class(**components)
248+
pipe.to("cpu")
249+
pipe.set_progress_bar_config(disable=None)
250+
251+
inputs = self.get_dummy_inputs("cpu")
252+
prompt = inputs.pop("prompt")
253+
254+
prompt_embeds, prompt_embeds_mask = pipe.encode_prompt(
255+
prompt=prompt,
256+
image=inputs.get("image") if "image" in inputs else None,
257+
device="cpu",
258+
num_images_per_prompt=1,
259+
max_sequence_length=inputs.get("max_sequence_length", 16),
260+
)
261+
262+
inputs["prompt_embeds"] = prompt_embeds
263+
inputs["prompt_embeds_mask"] = prompt_embeds_mask
264+
inputs["negative_prompt_embeds"] = prompt_embeds
265+
inputs["negative_prompt"] = None
266+
inputs["negative_prompt_embeds_mask"] = None
267+
inputs["true_cfg_scale"] = 2.0
268+
269+
image = pipe(**inputs).images
270+
self.assertIsNotNone(image)

tests/pipelines/qwenimage/test_qwenimage_edit_plus.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -251,3 +251,30 @@ def test_inference_batch_consistent():
251251
@pytest.mark.xfail(condition=True, reason="Batch of multiple images needs to be revisited", strict=True)
252252
def test_inference_batch_single_identical():
253253
super().test_inference_batch_single_identical()
254+
255+
def test_true_cfg_without_negative_prompt_embeds_mask(self):
256+
components = self.get_dummy_components()
257+
pipe = self.pipeline_class(**components)
258+
pipe.to("cpu")
259+
pipe.set_progress_bar_config(disable=None)
260+
261+
inputs = self.get_dummy_inputs("cpu")
262+
prompt = inputs.pop("prompt")
263+
264+
prompt_embeds, prompt_embeds_mask = pipe.encode_prompt(
265+
prompt=prompt,
266+
image=inputs.get("image") if "image" in inputs else None,
267+
device="cpu",
268+
num_images_per_prompt=1,
269+
max_sequence_length=inputs.get("max_sequence_length", 16),
270+
)
271+
272+
inputs["prompt_embeds"] = prompt_embeds
273+
inputs["prompt_embeds_mask"] = prompt_embeds_mask
274+
inputs["negative_prompt_embeds"] = prompt_embeds
275+
inputs["negative_prompt"] = None
276+
inputs["negative_prompt_embeds_mask"] = None
277+
inputs["true_cfg_scale"] = 2.0
278+
279+
image = pipe(**inputs).images
280+
self.assertIsNotNone(image)

tests/pipelines/qwenimage/test_qwenimage_img2img.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -216,3 +216,29 @@ def test_vae_tiling(self, expected_diff_max: float = 0.2):
216216
expected_diff_max,
217217
"VAE tiling should not affect the inference results",
218218
)
219+
220+
def test_true_cfg_without_negative_prompt_embeds_mask(self):
221+
components = self.get_dummy_components()
222+
pipe = self.pipeline_class(**components)
223+
pipe.to("cpu")
224+
pipe.set_progress_bar_config(disable=None)
225+
226+
inputs = self.get_dummy_inputs("cpu")
227+
prompt = inputs.pop("prompt")
228+
229+
prompt_embeds, prompt_embeds_mask = pipe.encode_prompt(
230+
prompt=prompt,
231+
device="cpu",
232+
num_images_per_prompt=1,
233+
max_sequence_length=inputs.get("max_sequence_length", 16),
234+
)
235+
236+
inputs["prompt_embeds"] = prompt_embeds
237+
inputs["prompt_embeds_mask"] = prompt_embeds_mask
238+
inputs["negative_prompt_embeds"] = prompt_embeds
239+
inputs["negative_prompt"] = None
240+
inputs["negative_prompt_embeds_mask"] = None
241+
inputs["true_cfg_scale"] = 2.0
242+
243+
image = pipe(**inputs).images
244+
self.assertIsNotNone(image)

tests/pipelines/qwenimage/test_qwenimage_inpaint.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -231,3 +231,29 @@ def test_vae_tiling(self, expected_diff_max: float = 0.2):
231231
expected_diff_max,
232232
"VAE tiling should not affect the inference results",
233233
)
234+
235+
def test_true_cfg_without_negative_prompt_embeds_mask(self):
236+
components = self.get_dummy_components()
237+
pipe = self.pipeline_class(**components)
238+
pipe.to("cpu")
239+
pipe.set_progress_bar_config(disable=None)
240+
241+
inputs = self.get_dummy_inputs("cpu")
242+
prompt = inputs.pop("prompt")
243+
244+
prompt_embeds, prompt_embeds_mask = pipe.encode_prompt(
245+
prompt=prompt,
246+
device="cpu",
247+
num_images_per_prompt=1,
248+
max_sequence_length=inputs.get("max_sequence_length", 16),
249+
)
250+
251+
inputs["prompt_embeds"] = prompt_embeds
252+
inputs["prompt_embeds_mask"] = prompt_embeds_mask
253+
inputs["negative_prompt_embeds"] = prompt_embeds
254+
inputs["negative_prompt"] = None
255+
inputs["negative_prompt_embeds_mask"] = None
256+
inputs["true_cfg_scale"] = 2.0
257+
258+
image = pipe(**inputs).images
259+
self.assertIsNotNone(image)

0 commit comments

Comments
 (0)