Skip to content
Prev Previous commit
Next Next commit
Update tests
  • Loading branch information
anton-l committed Aug 16, 2022
commit 1be007191eb4c10b1cdaaa89a5237b605d175ab1
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,10 @@ def __call__(
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)

# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, i, latents, **extra_step_kwargs)["prev_sample"]
if isinstance(self.scheduler, LmsDiscreteScheduler):
latents = self.scheduler.step(noise_pred, i, latents, **extra_step_kwargs)["prev_sample"]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

shouldn't we maybe pass the sigma here?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok we need it anyway - good to leave as is for me!

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The i is needed inside the step for other calculations too, and we can't reverse the sigma into i, so we'll need it like this until a more full refactor.

else:
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs)["prev_sample"]
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This can be made cleaner by removing the branch with t->i, but for now I'm not touching the timesteps from k-lms for ease of debugging.


# scale and decode the image latents with vae
latents = 1 / 0.18215 * latents
Expand Down
7 changes: 4 additions & 3 deletions tests/test_modeling_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -842,7 +842,7 @@ def test_ldm_text2img_fast(self):
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

@slow
@unittest.skipIf(torch_device == "cpu", "Stable diffusion is suppused to run on GPU")
@unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
def test_stable_diffusion(self):
# make sure here that pndm scheduler skips prk
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1-diffusers")
Expand All @@ -863,7 +863,7 @@ def test_stable_diffusion(self):
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

@slow
@unittest.skipIf(torch_device == "cpu", "Stable diffusion is suppused to run on GPU")
@unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
def test_stable_diffusion_fast_ddim(self):
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1-diffusers")

Expand Down Expand Up @@ -979,7 +979,8 @@ def test_karras_ve_pipeline(self):
expected_slice = np.array([0.26815, 0.1581, 0.2658, 0.23248, 0.1550, 0.2539, 0.1131, 0.1024, 0.0837])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

# @slow
@slow
@unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
def test_lms_stable_diffusion_pipeline(self):
model_id = "CompVis/stable-diffusion-v1-1-diffusers"
pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=True)
Expand Down