Skip to content

Commit 2fcae69

Browse files
authored
Bump to 0.8.0.dev0 (huggingface#1131)
* Bump to 0.8.0.dev0 * deprecate int timesteps * style
1 parent a480229 commit 2fcae69

File tree

5 files changed

+9
-60
lines changed

5 files changed

+9
-60
lines changed

setup.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,7 @@ def run(self):
210210

211211
setup(
212212
name="diffusers",
213-
version="0.7.0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
213+
version="0.8.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
214214
description="Diffusers",
215215
long_description=open("README.md", "r", encoding="utf-8").read(),
216216
long_description_content_type="text/markdown",

src/diffusers/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
)
1010

1111

12-
__version__ = "0.7.0"
12+
__version__ = "0.8.0.dev0"
1313

1414
from .configuration_utils import ConfigMixin
1515
from .onnx_utils import OnnxRuntimeModel

src/diffusers/schedulers/scheduling_euler_ancestral_discrete.py

+2-14
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
import torch
2020

2121
from ..configuration_utils import ConfigMixin, register_to_config
22-
from ..utils import BaseOutput, deprecate, logging
22+
from ..utils import BaseOutput, logging
2323
from .scheduling_utils import SchedulerMixin
2424

2525

@@ -253,19 +253,7 @@ def add_noise(
253253
timesteps = timesteps.to(original_samples.device)
254254

255255
schedule_timesteps = self.timesteps
256-
257-
if isinstance(timesteps, torch.IntTensor) or isinstance(timesteps, torch.LongTensor):
258-
deprecate(
259-
"timesteps as indices",
260-
"0.8.0",
261-
"Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
262-
" `EulerAncestralDiscreteScheduler.add_noise()` will not be supported in future versions. Make sure to"
263-
" pass values from `scheduler.timesteps` as timesteps.",
264-
standard_warn=False,
265-
)
266-
step_indices = timesteps
267-
else:
268-
step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps]
256+
step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps]
269257

270258
sigma = self.sigmas[step_indices].flatten()
271259
while len(sigma.shape) < len(original_samples.shape):

src/diffusers/schedulers/scheduling_euler_discrete.py

+2-14
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
import torch
2020

2121
from ..configuration_utils import ConfigMixin, register_to_config
22-
from ..utils import BaseOutput, deprecate, logging
22+
from ..utils import BaseOutput, logging
2323
from .scheduling_utils import SchedulerMixin
2424

2525

@@ -262,19 +262,7 @@ def add_noise(
262262
timesteps = timesteps.to(original_samples.device)
263263

264264
schedule_timesteps = self.timesteps
265-
266-
if isinstance(timesteps, torch.IntTensor) or isinstance(timesteps, torch.LongTensor):
267-
deprecate(
268-
"timesteps as indices",
269-
"0.8.0",
270-
"Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
271-
" `EulerDiscreteScheduler.add_noise()` will not be supported in future versions. Make sure to"
272-
" pass values from `scheduler.timesteps` as timesteps.",
273-
standard_warn=False,
274-
)
275-
step_indices = timesteps
276-
else:
277-
step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps]
265+
step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps]
278266

279267
sigma = self.sigmas[step_indices].flatten()
280268
while len(sigma.shape) < len(original_samples.shape):

src/diffusers/schedulers/scheduling_lms_discrete.py

+3-30
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
from scipy import integrate
2222

2323
from ..configuration_utils import ConfigMixin, register_to_config
24-
from ..utils import BaseOutput, deprecate
24+
from ..utils import BaseOutput
2525
from .scheduling_utils import SchedulerMixin
2626

2727

@@ -211,22 +211,7 @@ def step(
211211

212212
if isinstance(timestep, torch.Tensor):
213213
timestep = timestep.to(self.timesteps.device)
214-
if (
215-
isinstance(timestep, int)
216-
or isinstance(timestep, torch.IntTensor)
217-
or isinstance(timestep, torch.LongTensor)
218-
):
219-
deprecate(
220-
"timestep as an index",
221-
"0.8.0",
222-
"Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
223-
" `LMSDiscreteScheduler.step()` will not be supported in future versions. Make sure to pass"
224-
" one of the `scheduler.timesteps` as a timestep.",
225-
standard_warn=False,
226-
)
227-
step_index = timestep
228-
else:
229-
step_index = (self.timesteps == timestep).nonzero().item()
214+
step_index = (self.timesteps == timestep).nonzero().item()
230215
sigma = self.sigmas[step_index]
231216

232217
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
@@ -269,19 +254,7 @@ def add_noise(
269254
timesteps = timesteps.to(original_samples.device)
270255

271256
schedule_timesteps = self.timesteps
272-
273-
if isinstance(timesteps, torch.IntTensor) or isinstance(timesteps, torch.LongTensor):
274-
deprecate(
275-
"timesteps as indices",
276-
"0.8.0",
277-
"Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
278-
" `LMSDiscreteScheduler.add_noise()` will not be supported in future versions. Make sure to"
279-
" pass values from `scheduler.timesteps` as timesteps.",
280-
standard_warn=False,
281-
)
282-
step_indices = timesteps
283-
else:
284-
step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps]
257+
step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps]
285258

286259
sigma = self.sigmas[step_indices].flatten()
287260
while len(sigma.shape) < len(original_samples.shape):

0 commit comments

Comments
 (0)