|  | import inspect | 
					
						
						|  | import re | 
					
						
						|  | from typing import Any, Callable, Dict, List, Optional, Union | 
					
						
						|  |  | 
					
						
						|  | import numpy as np | 
					
						
						|  | import PIL.Image | 
					
						
						|  | import torch | 
					
						
						|  | from packaging import version | 
					
						
						|  | from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer | 
					
						
						|  |  | 
					
						
						|  | from diffusers import DiffusionPipeline | 
					
						
						|  | from diffusers.configuration_utils import FrozenDict | 
					
						
						|  | from diffusers.image_processor import VaeImageProcessor | 
					
						
						|  | from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin | 
					
						
						|  | from diffusers.models import AutoencoderKL, UNet2DConditionModel | 
					
						
						|  | from diffusers.pipelines.pipeline_utils import StableDiffusionMixin | 
					
						
						|  | from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker | 
					
						
						|  | from diffusers.schedulers import KarrasDiffusionSchedulers | 
					
						
						|  | from diffusers.utils import ( | 
					
						
						|  | PIL_INTERPOLATION, | 
					
						
						|  | deprecate, | 
					
						
						|  | logging, | 
					
						
						|  | ) | 
					
						
						|  | from diffusers.utils.torch_utils import randn_tensor | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | logger = logging.get_logger(__name__) | 
					
						
						|  |  | 
					
						
						|  | re_attention = re.compile( | 
					
						
						|  | r""" | 
					
						
						|  | \\\(| | 
					
						
						|  | \\\)| | 
					
						
						|  | \\\[| | 
					
						
						|  | \\]| | 
					
						
						|  | \\\\| | 
					
						
						|  | \\| | 
					
						
						|  | \(| | 
					
						
						|  | \[| | 
					
						
						|  | :([+-]?[.\d]+)\)| | 
					
						
						|  | \)| | 
					
						
						|  | ]| | 
					
						
						|  | [^\\()\[\]:]+| | 
					
						
						|  | : | 
					
						
						|  | """, | 
					
						
						|  | re.X, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def parse_prompt_attention(text): | 
					
						
						|  | """ | 
					
						
						|  | Parses a string with attention tokens and returns a list of pairs: text and its associated weight. | 
					
						
						|  | Accepted tokens are: | 
					
						
						|  | (abc) - increases attention to abc by a multiplier of 1.1 | 
					
						
						|  | (abc:3.12) - increases attention to abc by a multiplier of 3.12 | 
					
						
						|  | [abc] - decreases attention to abc by a multiplier of 1.1 | 
					
						
						|  | \\( - literal character '(' | 
					
						
						|  | \\[ - literal character '[' | 
					
						
						|  | \\) - literal character ')' | 
					
						
						|  | \\] - literal character ']' | 
					
						
						|  | \\ - literal character '\' | 
					
						
						|  | anything else - just text | 
					
						
						|  | >>> parse_prompt_attention('normal text') | 
					
						
						|  | [['normal text', 1.0]] | 
					
						
						|  | >>> parse_prompt_attention('an (important) word') | 
					
						
						|  | [['an ', 1.0], ['important', 1.1], [' word', 1.0]] | 
					
						
						|  | >>> parse_prompt_attention('(unbalanced') | 
					
						
						|  | [['unbalanced', 1.1]] | 
					
						
						|  | >>> parse_prompt_attention('\\(literal\\]') | 
					
						
						|  | [['(literal]', 1.0]] | 
					
						
						|  | >>> parse_prompt_attention('(unnecessary)(parens)') | 
					
						
						|  | [['unnecessaryparens', 1.1]] | 
					
						
						|  | >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).') | 
					
						
						|  | [['a ', 1.0], | 
					
						
						|  | ['house', 1.5730000000000004], | 
					
						
						|  | [' ', 1.1], | 
					
						
						|  | ['on', 1.0], | 
					
						
						|  | [' a ', 1.1], | 
					
						
						|  | ['hill', 0.55], | 
					
						
						|  | [', sun, ', 1.1], | 
					
						
						|  | ['sky', 1.4641000000000006], | 
					
						
						|  | ['.', 1.1]] | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  | res = [] | 
					
						
						|  | round_brackets = [] | 
					
						
						|  | square_brackets = [] | 
					
						
						|  |  | 
					
						
						|  | round_bracket_multiplier = 1.1 | 
					
						
						|  | square_bracket_multiplier = 1 / 1.1 | 
					
						
						|  |  | 
					
						
						|  | def multiply_range(start_position, multiplier): | 
					
						
						|  | for p in range(start_position, len(res)): | 
					
						
						|  | res[p][1] *= multiplier | 
					
						
						|  |  | 
					
						
						|  | for m in re_attention.finditer(text): | 
					
						
						|  | text = m.group(0) | 
					
						
						|  | weight = m.group(1) | 
					
						
						|  |  | 
					
						
						|  | if text.startswith("\\"): | 
					
						
						|  | res.append([text[1:], 1.0]) | 
					
						
						|  | elif text == "(": | 
					
						
						|  | round_brackets.append(len(res)) | 
					
						
						|  | elif text == "[": | 
					
						
						|  | square_brackets.append(len(res)) | 
					
						
						|  | elif weight is not None and len(round_brackets) > 0: | 
					
						
						|  | multiply_range(round_brackets.pop(), float(weight)) | 
					
						
						|  | elif text == ")" and len(round_brackets) > 0: | 
					
						
						|  | multiply_range(round_brackets.pop(), round_bracket_multiplier) | 
					
						
						|  | elif text == "]" and len(square_brackets) > 0: | 
					
						
						|  | multiply_range(square_brackets.pop(), square_bracket_multiplier) | 
					
						
						|  | else: | 
					
						
						|  | res.append([text, 1.0]) | 
					
						
						|  |  | 
					
						
						|  | for pos in round_brackets: | 
					
						
						|  | multiply_range(pos, round_bracket_multiplier) | 
					
						
						|  |  | 
					
						
						|  | for pos in square_brackets: | 
					
						
						|  | multiply_range(pos, square_bracket_multiplier) | 
					
						
						|  |  | 
					
						
						|  | if len(res) == 0: | 
					
						
						|  | res = [["", 1.0]] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | i = 0 | 
					
						
						|  | while i + 1 < len(res): | 
					
						
						|  | if res[i][1] == res[i + 1][1]: | 
					
						
						|  | res[i][0] += res[i + 1][0] | 
					
						
						|  | res.pop(i + 1) | 
					
						
						|  | else: | 
					
						
						|  | i += 1 | 
					
						
						|  |  | 
					
						
						|  | return res | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def get_prompts_with_weights(pipe: DiffusionPipeline, prompt: List[str], max_length: int): | 
					
						
						|  | r""" | 
					
						
						|  | Tokenize a list of prompts and return its tokens with weights of each token. | 
					
						
						|  |  | 
					
						
						|  | No padding, starting or ending token is included. | 
					
						
						|  | """ | 
					
						
						|  | tokens = [] | 
					
						
						|  | weights = [] | 
					
						
						|  | truncated = False | 
					
						
						|  | for text in prompt: | 
					
						
						|  | texts_and_weights = parse_prompt_attention(text) | 
					
						
						|  | text_token = [] | 
					
						
						|  | text_weight = [] | 
					
						
						|  | for word, weight in texts_and_weights: | 
					
						
						|  |  | 
					
						
						|  | token = pipe.tokenizer(word).input_ids[1:-1] | 
					
						
						|  | text_token += token | 
					
						
						|  |  | 
					
						
						|  | text_weight += [weight] * len(token) | 
					
						
						|  |  | 
					
						
						|  | if len(text_token) > max_length: | 
					
						
						|  | truncated = True | 
					
						
						|  | break | 
					
						
						|  |  | 
					
						
						|  | if len(text_token) > max_length: | 
					
						
						|  | truncated = True | 
					
						
						|  | text_token = text_token[:max_length] | 
					
						
						|  | text_weight = text_weight[:max_length] | 
					
						
						|  | tokens.append(text_token) | 
					
						
						|  | weights.append(text_weight) | 
					
						
						|  | if truncated: | 
					
						
						|  | logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples") | 
					
						
						|  | return tokens, weights | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77): | 
					
						
						|  | r""" | 
					
						
						|  | Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length. | 
					
						
						|  | """ | 
					
						
						|  | max_embeddings_multiples = (max_length - 2) // (chunk_length - 2) | 
					
						
						|  | weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length | 
					
						
						|  | for i in range(len(tokens)): | 
					
						
						|  | tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos] | 
					
						
						|  | if no_boseos_middle: | 
					
						
						|  | weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i])) | 
					
						
						|  | else: | 
					
						
						|  | w = [] | 
					
						
						|  | if len(weights[i]) == 0: | 
					
						
						|  | w = [1.0] * weights_length | 
					
						
						|  | else: | 
					
						
						|  | for j in range(max_embeddings_multiples): | 
					
						
						|  | w.append(1.0) | 
					
						
						|  | w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))] | 
					
						
						|  | w.append(1.0) | 
					
						
						|  | w += [1.0] * (weights_length - len(w)) | 
					
						
						|  | weights[i] = w[:] | 
					
						
						|  |  | 
					
						
						|  | return tokens, weights | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def get_unweighted_text_embeddings( | 
					
						
						|  | pipe: DiffusionPipeline, | 
					
						
						|  | text_input: torch.Tensor, | 
					
						
						|  | chunk_length: int, | 
					
						
						|  | no_boseos_middle: Optional[bool] = True, | 
					
						
						|  | ): | 
					
						
						|  | """ | 
					
						
						|  | When the length of tokens is a multiple of the capacity of the text encoder, | 
					
						
						|  | it should be split into chunks and sent to the text encoder individually. | 
					
						
						|  | """ | 
					
						
						|  | max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2) | 
					
						
						|  | if max_embeddings_multiples > 1: | 
					
						
						|  | text_embeddings = [] | 
					
						
						|  | for i in range(max_embeddings_multiples): | 
					
						
						|  |  | 
					
						
						|  | text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone() | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | text_input_chunk[:, 0] = text_input[0, 0] | 
					
						
						|  | text_input_chunk[:, -1] = text_input[0, -1] | 
					
						
						|  | text_embedding = pipe.text_encoder(text_input_chunk)[0] | 
					
						
						|  |  | 
					
						
						|  | if no_boseos_middle: | 
					
						
						|  | if i == 0: | 
					
						
						|  |  | 
					
						
						|  | text_embedding = text_embedding[:, :-1] | 
					
						
						|  | elif i == max_embeddings_multiples - 1: | 
					
						
						|  |  | 
					
						
						|  | text_embedding = text_embedding[:, 1:] | 
					
						
						|  | else: | 
					
						
						|  |  | 
					
						
						|  | text_embedding = text_embedding[:, 1:-1] | 
					
						
						|  |  | 
					
						
						|  | text_embeddings.append(text_embedding) | 
					
						
						|  | text_embeddings = torch.concat(text_embeddings, axis=1) | 
					
						
						|  | else: | 
					
						
						|  | text_embeddings = pipe.text_encoder(text_input)[0] | 
					
						
						|  | return text_embeddings | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def get_weighted_text_embeddings( | 
					
						
						|  | pipe: DiffusionPipeline, | 
					
						
						|  | prompt: Union[str, List[str]], | 
					
						
						|  | uncond_prompt: Optional[Union[str, List[str]]] = None, | 
					
						
						|  | max_embeddings_multiples: Optional[int] = 3, | 
					
						
						|  | no_boseos_middle: Optional[bool] = False, | 
					
						
						|  | skip_parsing: Optional[bool] = False, | 
					
						
						|  | skip_weighting: Optional[bool] = False, | 
					
						
						|  | ): | 
					
						
						|  | r""" | 
					
						
						|  | Prompts can be assigned with local weights using brackets. For example, | 
					
						
						|  | prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful', | 
					
						
						|  | and the embedding tokens corresponding to the words get multiplied by a constant, 1.1. | 
					
						
						|  |  | 
					
						
						|  | Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean. | 
					
						
						|  |  | 
					
						
						|  | Args: | 
					
						
						|  | pipe (`DiffusionPipeline`): | 
					
						
						|  | Pipe to provide access to the tokenizer and the text encoder. | 
					
						
						|  | prompt (`str` or `List[str]`): | 
					
						
						|  | The prompt or prompts to guide the image generation. | 
					
						
						|  | uncond_prompt (`str` or `List[str]`): | 
					
						
						|  | The unconditional prompt or prompts for guide the image generation. If unconditional prompt | 
					
						
						|  | is provided, the embeddings of prompt and uncond_prompt are concatenated. | 
					
						
						|  | max_embeddings_multiples (`int`, *optional*, defaults to `3`): | 
					
						
						|  | The max multiple length of prompt embeddings compared to the max output length of text encoder. | 
					
						
						|  | no_boseos_middle (`bool`, *optional*, defaults to `False`): | 
					
						
						|  | If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and | 
					
						
						|  | ending token in each of the chunk in the middle. | 
					
						
						|  | skip_parsing (`bool`, *optional*, defaults to `False`): | 
					
						
						|  | Skip the parsing of brackets. | 
					
						
						|  | skip_weighting (`bool`, *optional*, defaults to `False`): | 
					
						
						|  | Skip the weighting. When the parsing is skipped, it is forced True. | 
					
						
						|  | """ | 
					
						
						|  | max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2 | 
					
						
						|  | if isinstance(prompt, str): | 
					
						
						|  | prompt = [prompt] | 
					
						
						|  |  | 
					
						
						|  | if not skip_parsing: | 
					
						
						|  | prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2) | 
					
						
						|  | if uncond_prompt is not None: | 
					
						
						|  | if isinstance(uncond_prompt, str): | 
					
						
						|  | uncond_prompt = [uncond_prompt] | 
					
						
						|  | uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2) | 
					
						
						|  | else: | 
					
						
						|  | prompt_tokens = [ | 
					
						
						|  | token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True).input_ids | 
					
						
						|  | ] | 
					
						
						|  | prompt_weights = [[1.0] * len(token) for token in prompt_tokens] | 
					
						
						|  | if uncond_prompt is not None: | 
					
						
						|  | if isinstance(uncond_prompt, str): | 
					
						
						|  | uncond_prompt = [uncond_prompt] | 
					
						
						|  | uncond_tokens = [ | 
					
						
						|  | token[1:-1] | 
					
						
						|  | for token in pipe.tokenizer(uncond_prompt, max_length=max_length, truncation=True).input_ids | 
					
						
						|  | ] | 
					
						
						|  | uncond_weights = [[1.0] * len(token) for token in uncond_tokens] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | max_length = max([len(token) for token in prompt_tokens]) | 
					
						
						|  | if uncond_prompt is not None: | 
					
						
						|  | max_length = max(max_length, max([len(token) for token in uncond_tokens])) | 
					
						
						|  |  | 
					
						
						|  | max_embeddings_multiples = min( | 
					
						
						|  | max_embeddings_multiples, | 
					
						
						|  | (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1, | 
					
						
						|  | ) | 
					
						
						|  | max_embeddings_multiples = max(1, max_embeddings_multiples) | 
					
						
						|  | max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2 | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | bos = pipe.tokenizer.bos_token_id | 
					
						
						|  | eos = pipe.tokenizer.eos_token_id | 
					
						
						|  | pad = getattr(pipe.tokenizer, "pad_token_id", eos) | 
					
						
						|  | prompt_tokens, prompt_weights = pad_tokens_and_weights( | 
					
						
						|  | prompt_tokens, | 
					
						
						|  | prompt_weights, | 
					
						
						|  | max_length, | 
					
						
						|  | bos, | 
					
						
						|  | eos, | 
					
						
						|  | pad, | 
					
						
						|  | no_boseos_middle=no_boseos_middle, | 
					
						
						|  | chunk_length=pipe.tokenizer.model_max_length, | 
					
						
						|  | ) | 
					
						
						|  | prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=pipe.device) | 
					
						
						|  | if uncond_prompt is not None: | 
					
						
						|  | uncond_tokens, uncond_weights = pad_tokens_and_weights( | 
					
						
						|  | uncond_tokens, | 
					
						
						|  | uncond_weights, | 
					
						
						|  | max_length, | 
					
						
						|  | bos, | 
					
						
						|  | eos, | 
					
						
						|  | pad, | 
					
						
						|  | no_boseos_middle=no_boseos_middle, | 
					
						
						|  | chunk_length=pipe.tokenizer.model_max_length, | 
					
						
						|  | ) | 
					
						
						|  | uncond_tokens = torch.tensor(uncond_tokens, dtype=torch.long, device=pipe.device) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | text_embeddings = get_unweighted_text_embeddings( | 
					
						
						|  | pipe, | 
					
						
						|  | prompt_tokens, | 
					
						
						|  | pipe.tokenizer.model_max_length, | 
					
						
						|  | no_boseos_middle=no_boseos_middle, | 
					
						
						|  | ) | 
					
						
						|  | prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=text_embeddings.device) | 
					
						
						|  | if uncond_prompt is not None: | 
					
						
						|  | uncond_embeddings = get_unweighted_text_embeddings( | 
					
						
						|  | pipe, | 
					
						
						|  | uncond_tokens, | 
					
						
						|  | pipe.tokenizer.model_max_length, | 
					
						
						|  | no_boseos_middle=no_boseos_middle, | 
					
						
						|  | ) | 
					
						
						|  | uncond_weights = torch.tensor(uncond_weights, dtype=uncond_embeddings.dtype, device=uncond_embeddings.device) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | if (not skip_parsing) and (not skip_weighting): | 
					
						
						|  | previous_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype) | 
					
						
						|  | text_embeddings *= prompt_weights.unsqueeze(-1) | 
					
						
						|  | current_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype) | 
					
						
						|  | text_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1) | 
					
						
						|  | if uncond_prompt is not None: | 
					
						
						|  | previous_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype) | 
					
						
						|  | uncond_embeddings *= uncond_weights.unsqueeze(-1) | 
					
						
						|  | current_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype) | 
					
						
						|  | uncond_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1) | 
					
						
						|  |  | 
					
						
						|  | if uncond_prompt is not None: | 
					
						
						|  | return text_embeddings, uncond_embeddings | 
					
						
						|  | return text_embeddings, None | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def preprocess_image(image, batch_size): | 
					
						
						|  | w, h = image.size | 
					
						
						|  | w, h = (x - x % 8 for x in (w, h)) | 
					
						
						|  | image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) | 
					
						
						|  | image = np.array(image).astype(np.float32) / 255.0 | 
					
						
						|  | image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size) | 
					
						
						|  | image = torch.from_numpy(image) | 
					
						
						|  | return 2.0 * image - 1.0 | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def preprocess_mask(mask, batch_size, scale_factor=8): | 
					
						
						|  | if not isinstance(mask, torch.Tensor): | 
					
						
						|  | mask = mask.convert("L") | 
					
						
						|  | w, h = mask.size | 
					
						
						|  | w, h = (x - x % 8 for x in (w, h)) | 
					
						
						|  | mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"]) | 
					
						
						|  | mask = np.array(mask).astype(np.float32) / 255.0 | 
					
						
						|  | mask = np.tile(mask, (4, 1, 1)) | 
					
						
						|  | mask = np.vstack([mask[None]] * batch_size) | 
					
						
						|  | mask = 1 - mask | 
					
						
						|  | mask = torch.from_numpy(mask) | 
					
						
						|  | return mask | 
					
						
						|  |  | 
					
						
						|  | else: | 
					
						
						|  | valid_mask_channel_sizes = [1, 3] | 
					
						
						|  |  | 
					
						
						|  | if mask.shape[3] in valid_mask_channel_sizes: | 
					
						
						|  | mask = mask.permute(0, 3, 1, 2) | 
					
						
						|  | elif mask.shape[1] not in valid_mask_channel_sizes: | 
					
						
						|  | raise ValueError( | 
					
						
						|  | f"Mask channel dimension of size in {valid_mask_channel_sizes} should be second or fourth dimension," | 
					
						
						|  | f" but received mask of shape {tuple(mask.shape)}" | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | mask = mask.mean(dim=1, keepdim=True) | 
					
						
						|  | h, w = mask.shape[-2:] | 
					
						
						|  | h, w = (x - x % 8 for x in (h, w)) | 
					
						
						|  | mask = torch.nn.functional.interpolate(mask, (h // scale_factor, w // scale_factor)) | 
					
						
						|  | return mask | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class StableDiffusionLongPromptWeightingPipeline( | 
					
						
						|  | DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin | 
					
						
						|  | ): | 
					
						
						|  | r""" | 
					
						
						|  | Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing | 
					
						
						|  | weighting in prompt. | 
					
						
						|  |  | 
					
						
						|  | This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the | 
					
						
						|  | library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) | 
					
						
						|  |  | 
					
						
						|  | Args: | 
					
						
						|  | vae ([`AutoencoderKL`]): | 
					
						
						|  | Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. | 
					
						
						|  | text_encoder ([`CLIPTextModel`]): | 
					
						
						|  | Frozen text-encoder. Stable Diffusion uses the text portion of | 
					
						
						|  | [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically | 
					
						
						|  | the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. | 
					
						
						|  | tokenizer (`CLIPTokenizer`): | 
					
						
						|  | Tokenizer of class | 
					
						
						|  | [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). | 
					
						
						|  | unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. | 
					
						
						|  | scheduler ([`SchedulerMixin`]): | 
					
						
						|  | A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of | 
					
						
						|  | [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. | 
					
						
						|  | safety_checker ([`StableDiffusionSafetyChecker`]): | 
					
						
						|  | Classification module that estimates whether generated images could be considered offensive or harmful. | 
					
						
						|  | Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details. | 
					
						
						|  | feature_extractor ([`CLIPImageProcessor`]): | 
					
						
						|  | Model that extracts features from generated images to be used as inputs for the `safety_checker`. | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  | model_cpu_offload_seq = "text_encoder-->unet->vae" | 
					
						
						|  | _optional_components = ["safety_checker", "feature_extractor"] | 
					
						
						|  | _exclude_from_cpu_offload = ["safety_checker"] | 
					
						
						|  |  | 
					
						
						|  | def __init__( | 
					
						
						|  | self, | 
					
						
						|  | vae: AutoencoderKL, | 
					
						
						|  | text_encoder: CLIPTextModel, | 
					
						
						|  | tokenizer: CLIPTokenizer, | 
					
						
						|  | unet: UNet2DConditionModel, | 
					
						
						|  | scheduler: KarrasDiffusionSchedulers, | 
					
						
						|  | safety_checker: StableDiffusionSafetyChecker, | 
					
						
						|  | feature_extractor: CLIPImageProcessor, | 
					
						
						|  | requires_safety_checker: bool = True, | 
					
						
						|  | ): | 
					
						
						|  | super().__init__() | 
					
						
						|  |  | 
					
						
						|  | if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: | 
					
						
						|  | deprecation_message = ( | 
					
						
						|  | f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" | 
					
						
						|  | f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " | 
					
						
						|  | "to update the config accordingly as leaving `steps_offset` might led to incorrect results" | 
					
						
						|  | " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," | 
					
						
						|  | " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" | 
					
						
						|  | " file" | 
					
						
						|  | ) | 
					
						
						|  | deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) | 
					
						
						|  | new_config = dict(scheduler.config) | 
					
						
						|  | new_config["steps_offset"] = 1 | 
					
						
						|  | scheduler._internal_dict = FrozenDict(new_config) | 
					
						
						|  |  | 
					
						
						|  | if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: | 
					
						
						|  | deprecation_message = ( | 
					
						
						|  | f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." | 
					
						
						|  | " `clip_sample` should be set to False in the configuration file. Please make sure to update the" | 
					
						
						|  | " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" | 
					
						
						|  | " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" | 
					
						
						|  | " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" | 
					
						
						|  | ) | 
					
						
						|  | deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) | 
					
						
						|  | new_config = dict(scheduler.config) | 
					
						
						|  | new_config["clip_sample"] = False | 
					
						
						|  | scheduler._internal_dict = FrozenDict(new_config) | 
					
						
						|  |  | 
					
						
						|  | if safety_checker is None and requires_safety_checker: | 
					
						
						|  | logger.warning( | 
					
						
						|  | f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" | 
					
						
						|  | " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" | 
					
						
						|  | " results in services or applications open to the public. Both the diffusers team and Hugging Face" | 
					
						
						|  | " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" | 
					
						
						|  | " it only for use-cases that involve analyzing network behavior or auditing its results. For more" | 
					
						
						|  | " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | if safety_checker is not None and feature_extractor is None: | 
					
						
						|  | raise ValueError( | 
					
						
						|  | "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" | 
					
						
						|  | " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( | 
					
						
						|  | version.parse(unet.config._diffusers_version).base_version | 
					
						
						|  | ) < version.parse("0.9.0.dev0") | 
					
						
						|  | is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 | 
					
						
						|  | if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: | 
					
						
						|  | deprecation_message = ( | 
					
						
						|  | "The configuration file of the unet has set the default `sample_size` to smaller than" | 
					
						
						|  | " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" | 
					
						
						|  | " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" | 
					
						
						|  | " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" | 
					
						
						|  | " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" | 
					
						
						|  | " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" | 
					
						
						|  | " in the config might lead to incorrect results in future versions. If you have downloaded this" | 
					
						
						|  | " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" | 
					
						
						|  | " the `unet/config.json` file" | 
					
						
						|  | ) | 
					
						
						|  | deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) | 
					
						
						|  | new_config = dict(unet.config) | 
					
						
						|  | new_config["sample_size"] = 64 | 
					
						
						|  | unet._internal_dict = FrozenDict(new_config) | 
					
						
						|  | self.register_modules( | 
					
						
						|  | vae=vae, | 
					
						
						|  | text_encoder=text_encoder, | 
					
						
						|  | tokenizer=tokenizer, | 
					
						
						|  | unet=unet, | 
					
						
						|  | scheduler=scheduler, | 
					
						
						|  | safety_checker=safety_checker, | 
					
						
						|  | feature_extractor=feature_extractor, | 
					
						
						|  | ) | 
					
						
						|  | self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) | 
					
						
						|  |  | 
					
						
						|  | self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) | 
					
						
						|  | self.register_to_config( | 
					
						
						|  | requires_safety_checker=requires_safety_checker, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | def _encode_prompt( | 
					
						
						|  | self, | 
					
						
						|  | prompt, | 
					
						
						|  | device, | 
					
						
						|  | num_images_per_prompt, | 
					
						
						|  | do_classifier_free_guidance, | 
					
						
						|  | negative_prompt=None, | 
					
						
						|  | max_embeddings_multiples=3, | 
					
						
						|  | prompt_embeds: Optional[torch.Tensor] = None, | 
					
						
						|  | negative_prompt_embeds: Optional[torch.Tensor] = None, | 
					
						
						|  | ): | 
					
						
						|  | r""" | 
					
						
						|  | Encodes the prompt into text encoder hidden states. | 
					
						
						|  |  | 
					
						
						|  | Args: | 
					
						
						|  | prompt (`str` or `list(int)`): | 
					
						
						|  | prompt to be encoded | 
					
						
						|  | device: (`torch.device`): | 
					
						
						|  | torch device | 
					
						
						|  | num_images_per_prompt (`int`): | 
					
						
						|  | number of images that should be generated per prompt | 
					
						
						|  | do_classifier_free_guidance (`bool`): | 
					
						
						|  | whether to use classifier free guidance or not | 
					
						
						|  | negative_prompt (`str` or `List[str]`): | 
					
						
						|  | The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored | 
					
						
						|  | if `guidance_scale` is less than `1`). | 
					
						
						|  | max_embeddings_multiples (`int`, *optional*, defaults to `3`): | 
					
						
						|  | The max multiple length of prompt embeddings compared to the max output length of text encoder. | 
					
						
						|  | """ | 
					
						
						|  | if prompt is not None and isinstance(prompt, str): | 
					
						
						|  | batch_size = 1 | 
					
						
						|  | elif prompt is not None and isinstance(prompt, list): | 
					
						
						|  | batch_size = len(prompt) | 
					
						
						|  | else: | 
					
						
						|  | batch_size = prompt_embeds.shape[0] | 
					
						
						|  |  | 
					
						
						|  | if negative_prompt_embeds is None: | 
					
						
						|  | if negative_prompt is None: | 
					
						
						|  | negative_prompt = [""] * batch_size | 
					
						
						|  | elif isinstance(negative_prompt, str): | 
					
						
						|  | negative_prompt = [negative_prompt] * batch_size | 
					
						
						|  | if batch_size != len(negative_prompt): | 
					
						
						|  | raise ValueError( | 
					
						
						|  | f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" | 
					
						
						|  | f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" | 
					
						
						|  | " the batch size of `prompt`." | 
					
						
						|  | ) | 
					
						
						|  | if prompt_embeds is None or negative_prompt_embeds is None: | 
					
						
						|  | if isinstance(self, TextualInversionLoaderMixin): | 
					
						
						|  | prompt = self.maybe_convert_prompt(prompt, self.tokenizer) | 
					
						
						|  | if do_classifier_free_guidance and negative_prompt_embeds is None: | 
					
						
						|  | negative_prompt = self.maybe_convert_prompt(negative_prompt, self.tokenizer) | 
					
						
						|  |  | 
					
						
						|  | prompt_embeds1, negative_prompt_embeds1 = get_weighted_text_embeddings( | 
					
						
						|  | pipe=self, | 
					
						
						|  | prompt=prompt, | 
					
						
						|  | uncond_prompt=negative_prompt if do_classifier_free_guidance else None, | 
					
						
						|  | max_embeddings_multiples=max_embeddings_multiples, | 
					
						
						|  | ) | 
					
						
						|  | if prompt_embeds is None: | 
					
						
						|  | prompt_embeds = prompt_embeds1 | 
					
						
						|  | if negative_prompt_embeds is None: | 
					
						
						|  | negative_prompt_embeds = negative_prompt_embeds1 | 
					
						
						|  |  | 
					
						
						|  | bs_embed, seq_len, _ = prompt_embeds.shape | 
					
						
						|  |  | 
					
						
						|  | prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) | 
					
						
						|  | prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) | 
					
						
						|  |  | 
					
						
						|  | if do_classifier_free_guidance: | 
					
						
						|  | bs_embed, seq_len, _ = negative_prompt_embeds.shape | 
					
						
						|  | negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) | 
					
						
						|  | negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) | 
					
						
						|  | prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) | 
					
						
						|  |  | 
					
						
						|  | return prompt_embeds | 
					
						
						|  |  | 
					
						
						|  | def check_inputs( | 
					
						
						|  | self, | 
					
						
						|  | prompt, | 
					
						
						|  | height, | 
					
						
						|  | width, | 
					
						
						|  | strength, | 
					
						
						|  | callback_steps, | 
					
						
						|  | negative_prompt=None, | 
					
						
						|  | prompt_embeds=None, | 
					
						
						|  | negative_prompt_embeds=None, | 
					
						
						|  | ): | 
					
						
						|  | if height % 8 != 0 or width % 8 != 0: | 
					
						
						|  | raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") | 
					
						
						|  |  | 
					
						
						|  | if strength < 0 or strength > 1: | 
					
						
						|  | raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") | 
					
						
						|  |  | 
					
						
						|  | if (callback_steps is None) or ( | 
					
						
						|  | callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) | 
					
						
						|  | ): | 
					
						
						|  | raise ValueError( | 
					
						
						|  | f"`callback_steps` has to be a positive integer but is {callback_steps} of type" | 
					
						
						|  | f" {type(callback_steps)}." | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | if prompt is not None and prompt_embeds is not None: | 
					
						
						|  | raise ValueError( | 
					
						
						|  | f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" | 
					
						
						|  | " only forward one of the two." | 
					
						
						|  | ) | 
					
						
						|  | elif prompt is None and prompt_embeds is None: | 
					
						
						|  | raise ValueError( | 
					
						
						|  | "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." | 
					
						
						|  | ) | 
					
						
						|  | elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): | 
					
						
						|  | raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") | 
					
						
						|  |  | 
					
						
						|  | if negative_prompt is not None and negative_prompt_embeds is not None: | 
					
						
						|  | raise ValueError( | 
					
						
						|  | f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" | 
					
						
						|  | f" {negative_prompt_embeds}. Please make sure to only forward one of the two." | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | if prompt_embeds is not None and negative_prompt_embeds is not None: | 
					
						
						|  | if prompt_embeds.shape != negative_prompt_embeds.shape: | 
					
						
						|  | raise ValueError( | 
					
						
						|  | "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" | 
					
						
						|  | f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" | 
					
						
						|  | f" {negative_prompt_embeds.shape}." | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | def get_timesteps(self, num_inference_steps, strength, device, is_text2img): | 
					
						
						|  | if is_text2img: | 
					
						
						|  | return self.scheduler.timesteps.to(device), num_inference_steps | 
					
						
						|  | else: | 
					
						
						|  |  | 
					
						
						|  | init_timestep = min(int(num_inference_steps * strength), num_inference_steps) | 
					
						
						|  |  | 
					
						
						|  | t_start = max(num_inference_steps - init_timestep, 0) | 
					
						
						|  | timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] | 
					
						
						|  |  | 
					
						
						|  | return timesteps, num_inference_steps - t_start | 
					
						
						|  |  | 
					
						
						|  | def run_safety_checker(self, image, device, dtype): | 
					
						
						|  | if self.safety_checker is not None: | 
					
						
						|  | safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) | 
					
						
						|  | image, has_nsfw_concept = self.safety_checker( | 
					
						
						|  | images=image, clip_input=safety_checker_input.pixel_values.to(dtype) | 
					
						
						|  | ) | 
					
						
						|  | else: | 
					
						
						|  | has_nsfw_concept = None | 
					
						
						|  | return image, has_nsfw_concept | 
					
						
						|  |  | 
					
						
						|  | def decode_latents(self, latents): | 
					
						
						|  | latents = 1 / self.vae.config.scaling_factor * latents | 
					
						
						|  | image = self.vae.decode(latents).sample | 
					
						
						|  | image = (image / 2 + 0.5).clamp(0, 1) | 
					
						
						|  |  | 
					
						
						|  | image = image.cpu().permute(0, 2, 3, 1).float().numpy() | 
					
						
						|  | return image | 
					
						
						|  |  | 
					
						
						|  | def prepare_extra_step_kwargs(self, generator, eta): | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) | 
					
						
						|  | extra_step_kwargs = {} | 
					
						
						|  | if accepts_eta: | 
					
						
						|  | extra_step_kwargs["eta"] = eta | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) | 
					
						
						|  | if accepts_generator: | 
					
						
						|  | extra_step_kwargs["generator"] = generator | 
					
						
						|  | return extra_step_kwargs | 
					
						
						|  |  | 
					
						
						|  | def prepare_latents( | 
					
						
						|  | self, | 
					
						
						|  | image, | 
					
						
						|  | timestep, | 
					
						
						|  | num_images_per_prompt, | 
					
						
						|  | batch_size, | 
					
						
						|  | num_channels_latents, | 
					
						
						|  | height, | 
					
						
						|  | width, | 
					
						
						|  | dtype, | 
					
						
						|  | device, | 
					
						
						|  | generator, | 
					
						
						|  | latents=None, | 
					
						
						|  | ): | 
					
						
						|  | if image is None: | 
					
						
						|  | batch_size = batch_size * num_images_per_prompt | 
					
						
						|  | shape = ( | 
					
						
						|  | batch_size, | 
					
						
						|  | num_channels_latents, | 
					
						
						|  | int(height) // self.vae_scale_factor, | 
					
						
						|  | int(width) // self.vae_scale_factor, | 
					
						
						|  | ) | 
					
						
						|  | if isinstance(generator, list) and len(generator) != batch_size: | 
					
						
						|  | raise ValueError( | 
					
						
						|  | f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" | 
					
						
						|  | f" size of {batch_size}. Make sure the batch size matches the length of the generators." | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | if latents is None: | 
					
						
						|  | latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) | 
					
						
						|  | else: | 
					
						
						|  | latents = latents.to(device) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | latents = latents * self.scheduler.init_noise_sigma | 
					
						
						|  | return latents, None, None | 
					
						
						|  | else: | 
					
						
						|  | image = image.to(device=self.device, dtype=dtype) | 
					
						
						|  | init_latent_dist = self.vae.encode(image).latent_dist | 
					
						
						|  | init_latents = init_latent_dist.sample(generator=generator) | 
					
						
						|  | init_latents = self.vae.config.scaling_factor * init_latents | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0) | 
					
						
						|  | init_latents_orig = init_latents | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | noise = randn_tensor(init_latents.shape, generator=generator, device=self.device, dtype=dtype) | 
					
						
						|  | init_latents = self.scheduler.add_noise(init_latents, noise, timestep) | 
					
						
						|  | latents = init_latents | 
					
						
						|  | return latents, init_latents_orig, noise | 
					
						
						|  |  | 
					
						
						|  | @torch.no_grad() | 
					
						
						|  | def __call__( | 
					
						
						|  | self, | 
					
						
						|  | prompt: Union[str, List[str]], | 
					
						
						|  | negative_prompt: Optional[Union[str, List[str]]] = None, | 
					
						
						|  | image: Union[torch.Tensor, PIL.Image.Image] = None, | 
					
						
						|  | mask_image: Union[torch.Tensor, PIL.Image.Image] = None, | 
					
						
						|  | height: int = 512, | 
					
						
						|  | width: int = 512, | 
					
						
						|  | num_inference_steps: int = 50, | 
					
						
						|  | guidance_scale: float = 7.5, | 
					
						
						|  | strength: float = 0.8, | 
					
						
						|  | num_images_per_prompt: Optional[int] = 1, | 
					
						
						|  | add_predicted_noise: Optional[bool] = False, | 
					
						
						|  | eta: float = 0.0, | 
					
						
						|  | generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, | 
					
						
						|  | latents: Optional[torch.Tensor] = None, | 
					
						
						|  | prompt_embeds: Optional[torch.Tensor] = None, | 
					
						
						|  | negative_prompt_embeds: Optional[torch.Tensor] = None, | 
					
						
						|  | max_embeddings_multiples: Optional[int] = 3, | 
					
						
						|  | output_type: Optional[str] = "pil", | 
					
						
						|  | return_dict: bool = True, | 
					
						
						|  | callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, | 
					
						
						|  | is_cancelled_callback: Optional[Callable[[], bool]] = None, | 
					
						
						|  | callback_steps: int = 1, | 
					
						
						|  | cross_attention_kwargs: Optional[Dict[str, Any]] = None, | 
					
						
						|  | ): | 
					
						
						|  | r""" | 
					
						
						|  | Function invoked when calling the pipeline for generation. | 
					
						
						|  |  | 
					
						
						|  | Args: | 
					
						
						|  | prompt (`str` or `List[str]`): | 
					
						
						|  | The prompt or prompts to guide the image generation. | 
					
						
						|  | negative_prompt (`str` or `List[str]`, *optional*): | 
					
						
						|  | The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored | 
					
						
						|  | if `guidance_scale` is less than `1`). | 
					
						
						|  | image (`torch.Tensor` or `PIL.Image.Image`): | 
					
						
						|  | `Image`, or tensor representing an image batch, that will be used as the starting point for the | 
					
						
						|  | process. | 
					
						
						|  | mask_image (`torch.Tensor` or `PIL.Image.Image`): | 
					
						
						|  | `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be | 
					
						
						|  | replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a | 
					
						
						|  | PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should | 
					
						
						|  | contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. | 
					
						
						|  | height (`int`, *optional*, defaults to 512): | 
					
						
						|  | The height in pixels of the generated image. | 
					
						
						|  | width (`int`, *optional*, defaults to 512): | 
					
						
						|  | The width in pixels of the generated image. | 
					
						
						|  | num_inference_steps (`int`, *optional*, defaults to 50): | 
					
						
						|  | The number of denoising steps. More denoising steps usually lead to a higher quality image at the | 
					
						
						|  | expense of slower inference. | 
					
						
						|  | guidance_scale (`float`, *optional*, defaults to 7.5): | 
					
						
						|  | Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). | 
					
						
						|  | `guidance_scale` is defined as `w` of equation 2. of [Imagen | 
					
						
						|  | Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > | 
					
						
						|  | 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, | 
					
						
						|  | usually at the expense of lower image quality. | 
					
						
						|  | strength (`float`, *optional*, defaults to 0.8): | 
					
						
						|  | Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. | 
					
						
						|  | `image` will be used as a starting point, adding more noise to it the larger the `strength`. The | 
					
						
						|  | number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added | 
					
						
						|  | noise will be maximum and the denoising process will run for the full number of iterations specified in | 
					
						
						|  | `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. | 
					
						
						|  | num_images_per_prompt (`int`, *optional*, defaults to 1): | 
					
						
						|  | The number of images to generate per prompt. | 
					
						
						|  | add_predicted_noise (`bool`, *optional*, defaults to True): | 
					
						
						|  | Use predicted noise instead of random noise when constructing noisy versions of the original image in | 
					
						
						|  | the reverse diffusion process | 
					
						
						|  | eta (`float`, *optional*, defaults to 0.0): | 
					
						
						|  | Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to | 
					
						
						|  | [`schedulers.DDIMScheduler`], will be ignored for others. | 
					
						
						|  | generator (`torch.Generator` or `List[torch.Generator]`, *optional*): | 
					
						
						|  | One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) | 
					
						
						|  | to make generation deterministic. | 
					
						
						|  | latents (`torch.Tensor`, *optional*): | 
					
						
						|  | Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | 
					
						
						|  | generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | 
					
						
						|  | tensor will ge generated by sampling using the supplied random `generator`. | 
					
						
						|  | prompt_embeds (`torch.Tensor`, *optional*): | 
					
						
						|  | Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not | 
					
						
						|  | provided, text embeddings will be generated from `prompt` input argument. | 
					
						
						|  | negative_prompt_embeds (`torch.Tensor`, *optional*): | 
					
						
						|  | Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt | 
					
						
						|  | weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input | 
					
						
						|  | argument. | 
					
						
						|  | max_embeddings_multiples (`int`, *optional*, defaults to `3`): | 
					
						
						|  | The max multiple length of prompt embeddings compared to the max output length of text encoder. | 
					
						
						|  | output_type (`str`, *optional*, defaults to `"pil"`): | 
					
						
						|  | The output format of the generate image. Choose between | 
					
						
						|  | [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. | 
					
						
						|  | return_dict (`bool`, *optional*, defaults to `True`): | 
					
						
						|  | Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a | 
					
						
						|  | plain tuple. | 
					
						
						|  | callback (`Callable`, *optional*): | 
					
						
						|  | A function that will be called every `callback_steps` steps during inference. The function will be | 
					
						
						|  | called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. | 
					
						
						|  | is_cancelled_callback (`Callable`, *optional*): | 
					
						
						|  | A function that will be called every `callback_steps` steps during inference. If the function returns | 
					
						
						|  | `True`, the inference will be cancelled. | 
					
						
						|  | callback_steps (`int`, *optional*, defaults to 1): | 
					
						
						|  | The frequency at which the `callback` function will be called. If not specified, the callback will be | 
					
						
						|  | called at every step. | 
					
						
						|  | cross_attention_kwargs (`dict`, *optional*): | 
					
						
						|  | A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under | 
					
						
						|  | `self.processor` in | 
					
						
						|  | [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). | 
					
						
						|  |  | 
					
						
						|  | Returns: | 
					
						
						|  | `None` if cancelled by `is_cancelled_callback`, | 
					
						
						|  | [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: | 
					
						
						|  | [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. | 
					
						
						|  | When returning a tuple, the first element is a list with the generated images, and the second element is a | 
					
						
						|  | list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" | 
					
						
						|  | (nsfw) content, according to the `safety_checker`. | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  | height = height or self.unet.config.sample_size * self.vae_scale_factor | 
					
						
						|  | width = width or self.unet.config.sample_size * self.vae_scale_factor | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | self.check_inputs( | 
					
						
						|  | prompt, height, width, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | if prompt is not None and isinstance(prompt, str): | 
					
						
						|  | batch_size = 1 | 
					
						
						|  | elif prompt is not None and isinstance(prompt, list): | 
					
						
						|  | batch_size = len(prompt) | 
					
						
						|  | else: | 
					
						
						|  | batch_size = prompt_embeds.shape[0] | 
					
						
						|  |  | 
					
						
						|  | device = self._execution_device | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | do_classifier_free_guidance = guidance_scale > 1.0 | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | prompt_embeds = self._encode_prompt( | 
					
						
						|  | prompt, | 
					
						
						|  | device, | 
					
						
						|  | num_images_per_prompt, | 
					
						
						|  | do_classifier_free_guidance, | 
					
						
						|  | negative_prompt, | 
					
						
						|  | max_embeddings_multiples, | 
					
						
						|  | prompt_embeds=prompt_embeds, | 
					
						
						|  | negative_prompt_embeds=negative_prompt_embeds, | 
					
						
						|  | ) | 
					
						
						|  | dtype = prompt_embeds.dtype | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | if isinstance(image, PIL.Image.Image): | 
					
						
						|  | image = preprocess_image(image, batch_size) | 
					
						
						|  | if image is not None: | 
					
						
						|  | image = image.to(device=self.device, dtype=dtype) | 
					
						
						|  | if isinstance(mask_image, PIL.Image.Image): | 
					
						
						|  | mask_image = preprocess_mask(mask_image, batch_size, self.vae_scale_factor) | 
					
						
						|  | if mask_image is not None: | 
					
						
						|  | mask = mask_image.to(device=self.device, dtype=dtype) | 
					
						
						|  | mask = torch.cat([mask] * num_images_per_prompt) | 
					
						
						|  | else: | 
					
						
						|  | mask = None | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | self.scheduler.set_timesteps(num_inference_steps, device=device) | 
					
						
						|  | timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device, image is None) | 
					
						
						|  | latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | latents, init_latents_orig, noise = self.prepare_latents( | 
					
						
						|  | image, | 
					
						
						|  | latent_timestep, | 
					
						
						|  | num_images_per_prompt, | 
					
						
						|  | batch_size, | 
					
						
						|  | self.unet.config.in_channels, | 
					
						
						|  | height, | 
					
						
						|  | width, | 
					
						
						|  | dtype, | 
					
						
						|  | device, | 
					
						
						|  | generator, | 
					
						
						|  | latents, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order | 
					
						
						|  | with self.progress_bar(total=num_inference_steps) as progress_bar: | 
					
						
						|  | for i, t in enumerate(timesteps): | 
					
						
						|  |  | 
					
						
						|  | latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents | 
					
						
						|  | latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | noise_pred = self.unet( | 
					
						
						|  | latent_model_input, | 
					
						
						|  | t, | 
					
						
						|  | encoder_hidden_states=prompt_embeds, | 
					
						
						|  | cross_attention_kwargs=cross_attention_kwargs, | 
					
						
						|  | ).sample | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | if do_classifier_free_guidance: | 
					
						
						|  | noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) | 
					
						
						|  | noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample | 
					
						
						|  |  | 
					
						
						|  | if mask is not None: | 
					
						
						|  |  | 
					
						
						|  | if add_predicted_noise: | 
					
						
						|  | init_latents_proper = self.scheduler.add_noise( | 
					
						
						|  | init_latents_orig, noise_pred_uncond, torch.tensor([t]) | 
					
						
						|  | ) | 
					
						
						|  | else: | 
					
						
						|  | init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t])) | 
					
						
						|  | latents = (init_latents_proper * mask) + (latents * (1 - mask)) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): | 
					
						
						|  | progress_bar.update() | 
					
						
						|  | if i % callback_steps == 0: | 
					
						
						|  | if callback is not None: | 
					
						
						|  | step_idx = i // getattr(self.scheduler, "order", 1) | 
					
						
						|  | callback(step_idx, t, latents) | 
					
						
						|  | if is_cancelled_callback is not None and is_cancelled_callback(): | 
					
						
						|  | return None | 
					
						
						|  |  | 
					
						
						|  | if output_type == "latent": | 
					
						
						|  | image = latents | 
					
						
						|  | has_nsfw_concept = None | 
					
						
						|  | elif output_type == "pil": | 
					
						
						|  |  | 
					
						
						|  | image = self.decode_latents(latents) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | image = self.numpy_to_pil(image) | 
					
						
						|  | else: | 
					
						
						|  |  | 
					
						
						|  | image = self.decode_latents(latents) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: | 
					
						
						|  | self.final_offload_hook.offload() | 
					
						
						|  |  | 
					
						
						|  | if not return_dict: | 
					
						
						|  | return image, has_nsfw_concept | 
					
						
						|  |  | 
					
						
						|  | return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) | 
					
						
						|  |  | 
					
						
						|  | def text2img( | 
					
						
						|  | self, | 
					
						
						|  | prompt: Union[str, List[str]], | 
					
						
						|  | negative_prompt: Optional[Union[str, List[str]]] = None, | 
					
						
						|  | height: int = 512, | 
					
						
						|  | width: int = 512, | 
					
						
						|  | num_inference_steps: int = 50, | 
					
						
						|  | guidance_scale: float = 7.5, | 
					
						
						|  | num_images_per_prompt: Optional[int] = 1, | 
					
						
						|  | eta: float = 0.0, | 
					
						
						|  | generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, | 
					
						
						|  | latents: Optional[torch.Tensor] = None, | 
					
						
						|  | prompt_embeds: Optional[torch.Tensor] = None, | 
					
						
						|  | negative_prompt_embeds: Optional[torch.Tensor] = None, | 
					
						
						|  | max_embeddings_multiples: Optional[int] = 3, | 
					
						
						|  | output_type: Optional[str] = "pil", | 
					
						
						|  | return_dict: bool = True, | 
					
						
						|  | callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, | 
					
						
						|  | is_cancelled_callback: Optional[Callable[[], bool]] = None, | 
					
						
						|  | callback_steps: int = 1, | 
					
						
						|  | cross_attention_kwargs: Optional[Dict[str, Any]] = None, | 
					
						
						|  | ): | 
					
						
						|  | r""" | 
					
						
						|  | Function for text-to-image generation. | 
					
						
						|  | Args: | 
					
						
						|  | prompt (`str` or `List[str]`): | 
					
						
						|  | The prompt or prompts to guide the image generation. | 
					
						
						|  | negative_prompt (`str` or `List[str]`, *optional*): | 
					
						
						|  | The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored | 
					
						
						|  | if `guidance_scale` is less than `1`). | 
					
						
						|  | height (`int`, *optional*, defaults to 512): | 
					
						
						|  | The height in pixels of the generated image. | 
					
						
						|  | width (`int`, *optional*, defaults to 512): | 
					
						
						|  | The width in pixels of the generated image. | 
					
						
						|  | num_inference_steps (`int`, *optional*, defaults to 50): | 
					
						
						|  | The number of denoising steps. More denoising steps usually lead to a higher quality image at the | 
					
						
						|  | expense of slower inference. | 
					
						
						|  | guidance_scale (`float`, *optional*, defaults to 7.5): | 
					
						
						|  | Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). | 
					
						
						|  | `guidance_scale` is defined as `w` of equation 2. of [Imagen | 
					
						
						|  | Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > | 
					
						
						|  | 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, | 
					
						
						|  | usually at the expense of lower image quality. | 
					
						
						|  | num_images_per_prompt (`int`, *optional*, defaults to 1): | 
					
						
						|  | The number of images to generate per prompt. | 
					
						
						|  | eta (`float`, *optional*, defaults to 0.0): | 
					
						
						|  | Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to | 
					
						
						|  | [`schedulers.DDIMScheduler`], will be ignored for others. | 
					
						
						|  | generator (`torch.Generator` or `List[torch.Generator]`, *optional*): | 
					
						
						|  | One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) | 
					
						
						|  | to make generation deterministic. | 
					
						
						|  | latents (`torch.Tensor`, *optional*): | 
					
						
						|  | Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | 
					
						
						|  | generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | 
					
						
						|  | tensor will ge generated by sampling using the supplied random `generator`. | 
					
						
						|  | prompt_embeds (`torch.Tensor`, *optional*): | 
					
						
						|  | Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not | 
					
						
						|  | provided, text embeddings will be generated from `prompt` input argument. | 
					
						
						|  | negative_prompt_embeds (`torch.Tensor`, *optional*): | 
					
						
						|  | Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt | 
					
						
						|  | weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input | 
					
						
						|  | argument. | 
					
						
						|  | max_embeddings_multiples (`int`, *optional*, defaults to `3`): | 
					
						
						|  | The max multiple length of prompt embeddings compared to the max output length of text encoder. | 
					
						
						|  | output_type (`str`, *optional*, defaults to `"pil"`): | 
					
						
						|  | The output format of the generate image. Choose between | 
					
						
						|  | [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. | 
					
						
						|  | return_dict (`bool`, *optional*, defaults to `True`): | 
					
						
						|  | Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a | 
					
						
						|  | plain tuple. | 
					
						
						|  | callback (`Callable`, *optional*): | 
					
						
						|  | A function that will be called every `callback_steps` steps during inference. The function will be | 
					
						
						|  | called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. | 
					
						
						|  | is_cancelled_callback (`Callable`, *optional*): | 
					
						
						|  | A function that will be called every `callback_steps` steps during inference. If the function returns | 
					
						
						|  | `True`, the inference will be cancelled. | 
					
						
						|  | callback_steps (`int`, *optional*, defaults to 1): | 
					
						
						|  | The frequency at which the `callback` function will be called. If not specified, the callback will be | 
					
						
						|  | called at every step. | 
					
						
						|  | cross_attention_kwargs (`dict`, *optional*): | 
					
						
						|  | A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under | 
					
						
						|  | `self.processor` in | 
					
						
						|  | [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). | 
					
						
						|  |  | 
					
						
						|  | Returns: | 
					
						
						|  | `None` if cancelled by `is_cancelled_callback`, | 
					
						
						|  | [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: | 
					
						
						|  | [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. | 
					
						
						|  | When returning a tuple, the first element is a list with the generated images, and the second element is a | 
					
						
						|  | list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" | 
					
						
						|  | (nsfw) content, according to the `safety_checker`. | 
					
						
						|  | """ | 
					
						
						|  | return self.__call__( | 
					
						
						|  | prompt=prompt, | 
					
						
						|  | negative_prompt=negative_prompt, | 
					
						
						|  | height=height, | 
					
						
						|  | width=width, | 
					
						
						|  | num_inference_steps=num_inference_steps, | 
					
						
						|  | guidance_scale=guidance_scale, | 
					
						
						|  | num_images_per_prompt=num_images_per_prompt, | 
					
						
						|  | eta=eta, | 
					
						
						|  | generator=generator, | 
					
						
						|  | latents=latents, | 
					
						
						|  | prompt_embeds=prompt_embeds, | 
					
						
						|  | negative_prompt_embeds=negative_prompt_embeds, | 
					
						
						|  | max_embeddings_multiples=max_embeddings_multiples, | 
					
						
						|  | output_type=output_type, | 
					
						
						|  | return_dict=return_dict, | 
					
						
						|  | callback=callback, | 
					
						
						|  | is_cancelled_callback=is_cancelled_callback, | 
					
						
						|  | callback_steps=callback_steps, | 
					
						
						|  | cross_attention_kwargs=cross_attention_kwargs, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | def img2img( | 
					
						
						|  | self, | 
					
						
						|  | image: Union[torch.Tensor, PIL.Image.Image], | 
					
						
						|  | prompt: Union[str, List[str]], | 
					
						
						|  | negative_prompt: Optional[Union[str, List[str]]] = None, | 
					
						
						|  | strength: float = 0.8, | 
					
						
						|  | num_inference_steps: Optional[int] = 50, | 
					
						
						|  | guidance_scale: Optional[float] = 7.5, | 
					
						
						|  | num_images_per_prompt: Optional[int] = 1, | 
					
						
						|  | eta: Optional[float] = 0.0, | 
					
						
						|  | generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, | 
					
						
						|  | prompt_embeds: Optional[torch.Tensor] = None, | 
					
						
						|  | negative_prompt_embeds: Optional[torch.Tensor] = None, | 
					
						
						|  | max_embeddings_multiples: Optional[int] = 3, | 
					
						
						|  | output_type: Optional[str] = "pil", | 
					
						
						|  | return_dict: bool = True, | 
					
						
						|  | callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, | 
					
						
						|  | is_cancelled_callback: Optional[Callable[[], bool]] = None, | 
					
						
						|  | callback_steps: int = 1, | 
					
						
						|  | cross_attention_kwargs: Optional[Dict[str, Any]] = None, | 
					
						
						|  | ): | 
					
						
						|  | r""" | 
					
						
						|  | Function for image-to-image generation. | 
					
						
						|  | Args: | 
					
						
						|  | image (`torch.Tensor` or `PIL.Image.Image`): | 
					
						
						|  | `Image`, or tensor representing an image batch, that will be used as the starting point for the | 
					
						
						|  | process. | 
					
						
						|  | prompt (`str` or `List[str]`): | 
					
						
						|  | The prompt or prompts to guide the image generation. | 
					
						
						|  | negative_prompt (`str` or `List[str]`, *optional*): | 
					
						
						|  | The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored | 
					
						
						|  | if `guidance_scale` is less than `1`). | 
					
						
						|  | strength (`float`, *optional*, defaults to 0.8): | 
					
						
						|  | Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. | 
					
						
						|  | `image` will be used as a starting point, adding more noise to it the larger the `strength`. The | 
					
						
						|  | number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added | 
					
						
						|  | noise will be maximum and the denoising process will run for the full number of iterations specified in | 
					
						
						|  | `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. | 
					
						
						|  | num_inference_steps (`int`, *optional*, defaults to 50): | 
					
						
						|  | The number of denoising steps. More denoising steps usually lead to a higher quality image at the | 
					
						
						|  | expense of slower inference. This parameter will be modulated by `strength`. | 
					
						
						|  | guidance_scale (`float`, *optional*, defaults to 7.5): | 
					
						
						|  | Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). | 
					
						
						|  | `guidance_scale` is defined as `w` of equation 2. of [Imagen | 
					
						
						|  | Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > | 
					
						
						|  | 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, | 
					
						
						|  | usually at the expense of lower image quality. | 
					
						
						|  | num_images_per_prompt (`int`, *optional*, defaults to 1): | 
					
						
						|  | The number of images to generate per prompt. | 
					
						
						|  | eta (`float`, *optional*, defaults to 0.0): | 
					
						
						|  | Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to | 
					
						
						|  | [`schedulers.DDIMScheduler`], will be ignored for others. | 
					
						
						|  | generator (`torch.Generator` or `List[torch.Generator]`, *optional*): | 
					
						
						|  | One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) | 
					
						
						|  | to make generation deterministic. | 
					
						
						|  | prompt_embeds (`torch.Tensor`, *optional*): | 
					
						
						|  | Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not | 
					
						
						|  | provided, text embeddings will be generated from `prompt` input argument. | 
					
						
						|  | negative_prompt_embeds (`torch.Tensor`, *optional*): | 
					
						
						|  | Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt | 
					
						
						|  | weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input | 
					
						
						|  | argument. | 
					
						
						|  | max_embeddings_multiples (`int`, *optional*, defaults to `3`): | 
					
						
						|  | The max multiple length of prompt embeddings compared to the max output length of text encoder. | 
					
						
						|  | output_type (`str`, *optional*, defaults to `"pil"`): | 
					
						
						|  | The output format of the generate image. Choose between | 
					
						
						|  | [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. | 
					
						
						|  | return_dict (`bool`, *optional*, defaults to `True`): | 
					
						
						|  | Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a | 
					
						
						|  | plain tuple. | 
					
						
						|  | callback (`Callable`, *optional*): | 
					
						
						|  | A function that will be called every `callback_steps` steps during inference. The function will be | 
					
						
						|  | called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. | 
					
						
						|  | is_cancelled_callback (`Callable`, *optional*): | 
					
						
						|  | A function that will be called every `callback_steps` steps during inference. If the function returns | 
					
						
						|  | `True`, the inference will be cancelled. | 
					
						
						|  | callback_steps (`int`, *optional*, defaults to 1): | 
					
						
						|  | The frequency at which the `callback` function will be called. If not specified, the callback will be | 
					
						
						|  | called at every step. | 
					
						
						|  | cross_attention_kwargs (`dict`, *optional*): | 
					
						
						|  | A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under | 
					
						
						|  | `self.processor` in | 
					
						
						|  | [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). | 
					
						
						|  |  | 
					
						
						|  | Returns: | 
					
						
						|  | `None` if cancelled by `is_cancelled_callback`, | 
					
						
						|  | [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. | 
					
						
						|  | When returning a tuple, the first element is a list with the generated images, and the second element is a | 
					
						
						|  | list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" | 
					
						
						|  | (nsfw) content, according to the `safety_checker`. | 
					
						
						|  | """ | 
					
						
						|  | return self.__call__( | 
					
						
						|  | prompt=prompt, | 
					
						
						|  | negative_prompt=negative_prompt, | 
					
						
						|  | image=image, | 
					
						
						|  | num_inference_steps=num_inference_steps, | 
					
						
						|  | guidance_scale=guidance_scale, | 
					
						
						|  | strength=strength, | 
					
						
						|  | num_images_per_prompt=num_images_per_prompt, | 
					
						
						|  | eta=eta, | 
					
						
						|  | generator=generator, | 
					
						
						|  | prompt_embeds=prompt_embeds, | 
					
						
						|  | negative_prompt_embeds=negative_prompt_embeds, | 
					
						
						|  | max_embeddings_multiples=max_embeddings_multiples, | 
					
						
						|  | output_type=output_type, | 
					
						
						|  | return_dict=return_dict, | 
					
						
						|  | callback=callback, | 
					
						
						|  | is_cancelled_callback=is_cancelled_callback, | 
					
						
						|  | callback_steps=callback_steps, | 
					
						
						|  | cross_attention_kwargs=cross_attention_kwargs, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | def inpaint( | 
					
						
						|  | self, | 
					
						
						|  | image: Union[torch.Tensor, PIL.Image.Image], | 
					
						
						|  | mask_image: Union[torch.Tensor, PIL.Image.Image], | 
					
						
						|  | prompt: Union[str, List[str]], | 
					
						
						|  | negative_prompt: Optional[Union[str, List[str]]] = None, | 
					
						
						|  | strength: float = 0.8, | 
					
						
						|  | num_inference_steps: Optional[int] = 50, | 
					
						
						|  | guidance_scale: Optional[float] = 7.5, | 
					
						
						|  | num_images_per_prompt: Optional[int] = 1, | 
					
						
						|  | add_predicted_noise: Optional[bool] = False, | 
					
						
						|  | eta: Optional[float] = 0.0, | 
					
						
						|  | generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, | 
					
						
						|  | prompt_embeds: Optional[torch.Tensor] = None, | 
					
						
						|  | negative_prompt_embeds: Optional[torch.Tensor] = None, | 
					
						
						|  | max_embeddings_multiples: Optional[int] = 3, | 
					
						
						|  | output_type: Optional[str] = "pil", | 
					
						
						|  | return_dict: bool = True, | 
					
						
						|  | callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, | 
					
						
						|  | is_cancelled_callback: Optional[Callable[[], bool]] = None, | 
					
						
						|  | callback_steps: int = 1, | 
					
						
						|  | cross_attention_kwargs: Optional[Dict[str, Any]] = None, | 
					
						
						|  | ): | 
					
						
						|  | r""" | 
					
						
						|  | Function for inpaint. | 
					
						
						|  | Args: | 
					
						
						|  | image (`torch.Tensor` or `PIL.Image.Image`): | 
					
						
						|  | `Image`, or tensor representing an image batch, that will be used as the starting point for the | 
					
						
						|  | process. This is the image whose masked region will be inpainted. | 
					
						
						|  | mask_image (`torch.Tensor` or `PIL.Image.Image`): | 
					
						
						|  | `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be | 
					
						
						|  | replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a | 
					
						
						|  | PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should | 
					
						
						|  | contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. | 
					
						
						|  | prompt (`str` or `List[str]`): | 
					
						
						|  | The prompt or prompts to guide the image generation. | 
					
						
						|  | negative_prompt (`str` or `List[str]`, *optional*): | 
					
						
						|  | The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored | 
					
						
						|  | if `guidance_scale` is less than `1`). | 
					
						
						|  | strength (`float`, *optional*, defaults to 0.8): | 
					
						
						|  | Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength` | 
					
						
						|  | is 1, the denoising process will be run on the masked area for the full number of iterations specified | 
					
						
						|  | in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more | 
					
						
						|  | noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur. | 
					
						
						|  | num_inference_steps (`int`, *optional*, defaults to 50): | 
					
						
						|  | The reference number of denoising steps. More denoising steps usually lead to a higher quality image at | 
					
						
						|  | the expense of slower inference. This parameter will be modulated by `strength`, as explained above. | 
					
						
						|  | guidance_scale (`float`, *optional*, defaults to 7.5): | 
					
						
						|  | Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). | 
					
						
						|  | `guidance_scale` is defined as `w` of equation 2. of [Imagen | 
					
						
						|  | Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > | 
					
						
						|  | 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, | 
					
						
						|  | usually at the expense of lower image quality. | 
					
						
						|  | num_images_per_prompt (`int`, *optional*, defaults to 1): | 
					
						
						|  | The number of images to generate per prompt. | 
					
						
						|  | add_predicted_noise (`bool`, *optional*, defaults to True): | 
					
						
						|  | Use predicted noise instead of random noise when constructing noisy versions of the original image in | 
					
						
						|  | the reverse diffusion process | 
					
						
						|  | eta (`float`, *optional*, defaults to 0.0): | 
					
						
						|  | Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to | 
					
						
						|  | [`schedulers.DDIMScheduler`], will be ignored for others. | 
					
						
						|  | generator (`torch.Generator` or `List[torch.Generator]`, *optional*): | 
					
						
						|  | One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) | 
					
						
						|  | to make generation deterministic. | 
					
						
						|  | prompt_embeds (`torch.Tensor`, *optional*): | 
					
						
						|  | Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not | 
					
						
						|  | provided, text embeddings will be generated from `prompt` input argument. | 
					
						
						|  | negative_prompt_embeds (`torch.Tensor`, *optional*): | 
					
						
						|  | Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt | 
					
						
						|  | weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input | 
					
						
						|  | argument. | 
					
						
						|  | max_embeddings_multiples (`int`, *optional*, defaults to `3`): | 
					
						
						|  | The max multiple length of prompt embeddings compared to the max output length of text encoder. | 
					
						
						|  | output_type (`str`, *optional*, defaults to `"pil"`): | 
					
						
						|  | The output format of the generate image. Choose between | 
					
						
						|  | [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. | 
					
						
						|  | return_dict (`bool`, *optional*, defaults to `True`): | 
					
						
						|  | Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a | 
					
						
						|  | plain tuple. | 
					
						
						|  | callback (`Callable`, *optional*): | 
					
						
						|  | A function that will be called every `callback_steps` steps during inference. The function will be | 
					
						
						|  | called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. | 
					
						
						|  | is_cancelled_callback (`Callable`, *optional*): | 
					
						
						|  | A function that will be called every `callback_steps` steps during inference. If the function returns | 
					
						
						|  | `True`, the inference will be cancelled. | 
					
						
						|  | callback_steps (`int`, *optional*, defaults to 1): | 
					
						
						|  | The frequency at which the `callback` function will be called. If not specified, the callback will be | 
					
						
						|  | called at every step. | 
					
						
						|  | cross_attention_kwargs (`dict`, *optional*): | 
					
						
						|  | A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under | 
					
						
						|  | `self.processor` in | 
					
						
						|  | [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). | 
					
						
						|  |  | 
					
						
						|  | Returns: | 
					
						
						|  | `None` if cancelled by `is_cancelled_callback`, | 
					
						
						|  | [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. | 
					
						
						|  | When returning a tuple, the first element is a list with the generated images, and the second element is a | 
					
						
						|  | list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" | 
					
						
						|  | (nsfw) content, according to the `safety_checker`. | 
					
						
						|  | """ | 
					
						
						|  | return self.__call__( | 
					
						
						|  | prompt=prompt, | 
					
						
						|  | negative_prompt=negative_prompt, | 
					
						
						|  | image=image, | 
					
						
						|  | mask_image=mask_image, | 
					
						
						|  | num_inference_steps=num_inference_steps, | 
					
						
						|  | guidance_scale=guidance_scale, | 
					
						
						|  | strength=strength, | 
					
						
						|  | num_images_per_prompt=num_images_per_prompt, | 
					
						
						|  | add_predicted_noise=add_predicted_noise, | 
					
						
						|  | eta=eta, | 
					
						
						|  | generator=generator, | 
					
						
						|  | prompt_embeds=prompt_embeds, | 
					
						
						|  | negative_prompt_embeds=negative_prompt_embeds, | 
					
						
						|  | max_embeddings_multiples=max_embeddings_multiples, | 
					
						
						|  | output_type=output_type, | 
					
						
						|  | return_dict=return_dict, | 
					
						
						|  | callback=callback, | 
					
						
						|  | is_cancelled_callback=is_cancelled_callback, | 
					
						
						|  | callback_steps=callback_steps, | 
					
						
						|  | cross_attention_kwargs=cross_attention_kwargs, | 
					
						
						|  | ) | 
					
						
						|  |  |