diff --git a/src/diffusers/models/model_loading_utils.py b/src/diffusers/models/model_loading_utils.py index 04642ad5d401..c280f998cb36 100644 --- a/src/diffusers/models/model_loading_utils.py +++ b/src/diffusers/models/model_loading_utils.py @@ -725,8 +725,8 @@ def _caching_allocator_warmup( ) -> None: """ This function warm-ups the caching allocator based on the size of the model tensors that will reside on each - device. It allows to have one large call to Malloc, instead of recursively calling it later when loading the model, - which is actually the loading speed bottleneck. Calling this function allows to cut the model loading time by a + device. It allows having one large call to Malloc, instead of recursively calling it later when loading the model, + which is actually the loading speed bottleneck. Calling this function allows cutting the model loading time by a very large margin. """ factor = 2 if hf_quantizer is None else hf_quantizer.get_cuda_warm_up_factor() diff --git a/src/diffusers/pipelines/auto_pipeline.py b/src/diffusers/pipelines/auto_pipeline.py index 2876798e14bd..e133a1776f31 100644 --- a/src/diffusers/pipelines/auto_pipeline.py +++ b/src/diffusers/pipelines/auto_pipeline.py @@ -966,7 +966,7 @@ def __init__(self, *args, **kwargs): @validate_hf_hub_args def from_pretrained(cls, pretrained_model_or_path, **kwargs): r""" - Instantiates a inpainting Pytorch diffusion pipeline from pretrained pipeline weight. + Instantiates an inpainting Pytorch diffusion pipeline from pretrained pipeline weight. The from_pretrained() method takes care of returning the correct pipeline class instance by: 1. Detect the pipeline class of the pretrained_model_or_path based on the _class_name property of its @@ -1121,7 +1121,7 @@ def from_pretrained(cls, pretrained_model_or_path, **kwargs): @classmethod def from_pipe(cls, pipeline, **kwargs): r""" - Instantiates a inpainting Pytorch diffusion pipeline from another instantiated diffusion pipeline class. + Instantiates an inpainting Pytorch diffusion pipeline from another instantiated diffusion pipeline class. The from_pipe() method takes care of returning the correct pipeline class instance by finding the inpainting pipeline linked to the pipeline class using pattern matching on pipeline class name. diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py b/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py index 9caf50e5e333..4d3c459a9066 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py @@ -247,8 +247,8 @@ class StableDiffusionXLPAGInpaintPipeline( A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): - Whether the `unet` requires a aesthetic_score condition to be passed during inference. Also see the config - of `stabilityai/stable-diffusion-xl-refiner-1-0`. + Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the + config of `stabilityai/stable-diffusion-xl-refiner-1-0`. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of `stabilityai/stable-diffusion-xl-base-1-0`. diff --git a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py index 8de7d4f0bb7d..5128385f082d 100644 --- a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +++ b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py @@ -257,8 +257,8 @@ class StableDiffusionXLInpaintPipeline( A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): - Whether the `unet` requires a aesthetic_score condition to be passed during inference. Also see the config - of `stabilityai/stable-diffusion-xl-refiner-1-0`. + Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the + config of `stabilityai/stable-diffusion-xl-refiner-1-0`. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of `stabilityai/stable-diffusion-xl-base-1-0`. diff --git a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py index b79119a94a0c..5ab887c2c73d 100644 --- a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +++ b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py @@ -153,8 +153,8 @@ class StableDiffusionXLInstructPix2PixPipeline( A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): - Whether the `unet` requires a aesthetic_score condition to be passed during inference. Also see the config - of `stabilityai/stable-diffusion-xl-refiner-1-0`. + Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the + config of `stabilityai/stable-diffusion-xl-refiner-1-0`. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of `stabilityai/stable-diffusion-xl-base-1-0`. diff --git a/src/diffusers/quantizers/base.py b/src/diffusers/quantizers/base.py index b0988284b648..2d5d407bac1b 100644 --- a/src/diffusers/quantizers/base.py +++ b/src/diffusers/quantizers/base.py @@ -78,7 +78,7 @@ def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": def update_device_map(self, device_map: dict[str, Any] | None) -> dict[str, Any] | None: """ - Override this method if you want to pass a override the existing device map with a new one. E.g. for + Override this method if you want to override the existing device map with a new one. E.g. for bitsandbytes, since `accelerate` is a hard requirement, if no device_map is passed, the device_map is set to `"auto"``