|
23 | 23 | title: Accelerate inference of text-to-image diffusion models |
24 | 24 | title: Tutorials |
25 | 25 | - sections: |
| 26 | + - local: using-diffusers/loading |
| 27 | + title: Load pipelines |
| 28 | + - local: using-diffusers/custom_pipeline_overview |
| 29 | + title: Load community pipelines and components |
| 30 | + - local: using-diffusers/schedulers |
| 31 | + title: Load schedulers and models |
| 32 | + - local: using-diffusers/using_safetensors |
| 33 | + title: Load safetensors |
| 34 | + - local: using-diffusers/other-formats |
| 35 | + title: Load different Stable Diffusion formats |
| 36 | + - local: using-diffusers/loading_adapters |
| 37 | + title: Load adapters |
| 38 | + - local: using-diffusers/push_to_hub |
| 39 | + title: Push files to the Hub |
| 40 | + title: Load pipelines and adapters |
| 41 | +- sections: |
| 42 | + - local: using-diffusers/unconditional_image_generation |
| 43 | + title: Unconditional image generation |
| 44 | + - local: using-diffusers/conditional_image_generation |
| 45 | + title: Text-to-image |
| 46 | + - local: using-diffusers/img2img |
| 47 | + title: Image-to-image |
| 48 | + - local: using-diffusers/inpaint |
| 49 | + title: Inpainting |
| 50 | + - local: using-diffusers/text-img2vid |
| 51 | + title: Text or image-to-video |
| 52 | + - local: using-diffusers/depth2img |
| 53 | + title: Depth-to-image |
| 54 | + title: Generative tasks |
| 55 | +- sections: |
| 56 | + - local: using-diffusers/overview_techniques |
| 57 | + title: Overview |
| 58 | + - local: training/distributed_inference |
| 59 | + title: Distributed inference with multiple GPUs |
| 60 | + - local: using-diffusers/merge_loras |
| 61 | + title: Merge LoRAs |
| 62 | + - local: using-diffusers/callback |
| 63 | + title: Pipeline callbacks |
| 64 | + - local: using-diffusers/reusing_seeds |
| 65 | + title: Improve image quality with deterministic generation |
| 66 | + - local: using-diffusers/control_brightness |
| 67 | + title: Control image brightness |
| 68 | + - local: using-diffusers/weighted_prompts |
| 69 | + title: Prompt techniques |
| 70 | + - local: using-diffusers/freeu |
| 71 | + title: Improve generation quality with FreeU |
| 72 | + title: Inference techniques |
| 73 | +- sections: |
| 74 | + - local: using-diffusers/sdxl |
| 75 | + title: Stable Diffusion XL |
| 76 | + - local: using-diffusers/sdxl_turbo |
| 77 | + title: SDXL Turbo |
| 78 | + - local: using-diffusers/kandinsky |
| 79 | + title: Kandinsky |
| 80 | + - local: using-diffusers/ip_adapter |
| 81 | + title: IP-Adapter |
| 82 | + - local: using-diffusers/controlnet |
| 83 | + title: ControlNet |
| 84 | + - local: using-diffusers/t2i_adapter |
| 85 | + title: T2I-Adapter |
| 86 | + - local: using-diffusers/textual_inversion_inference |
| 87 | + title: Textual inversion |
| 88 | + - local: using-diffusers/shap-e |
| 89 | + title: Shap-E |
| 90 | + - local: using-diffusers/diffedit |
| 91 | + title: DiffEdit |
| 92 | + - local: using-diffusers/reproducibility |
| 93 | + title: Create reproducible pipelines |
| 94 | + - local: using-diffusers/custom_pipeline_examples |
| 95 | + title: Community pipelines |
| 96 | + - local: using-diffusers/contribute_pipeline |
| 97 | + title: Contribute a community pipeline |
| 98 | + - local: using-diffusers/inference_with_lcm_lora |
| 99 | + title: Latent Consistency Model-LoRA |
| 100 | + - local: using-diffusers/inference_with_lcm |
| 101 | + title: Latent Consistency Model |
| 102 | + - local: using-diffusers/inference_with_tcd_lora |
| 103 | + title: Trajectory Consistency Distillation-LoRA |
| 104 | + - local: using-diffusers/svd |
| 105 | + title: Stable Video Diffusion |
| 106 | + title: Specific pipeline examples |
| 107 | +- sections: |
| 108 | + - local: training/overview |
| 109 | + title: Overview |
| 110 | + - local: training/create_dataset |
| 111 | + title: Create a dataset for training |
| 112 | + - local: training/adapt_a_model |
| 113 | + title: Adapt a model to a new task |
26 | 114 | - sections: |
27 | | - - local: using-diffusers/loading |
28 | | - title: Load pipelines |
29 | | - - local: using-diffusers/custom_pipeline_overview |
30 | | - title: Load community pipelines and components |
31 | | - - local: using-diffusers/schedulers |
32 | | - title: Load schedulers and models |
33 | | - - local: using-diffusers/using_safetensors |
34 | | - title: Load safetensors |
35 | | - - local: using-diffusers/other-formats |
36 | | - title: Load different Stable Diffusion formats |
37 | | - - local: using-diffusers/loading_adapters |
38 | | - title: Load adapters |
39 | | - - local: using-diffusers/push_to_hub |
40 | | - title: Push files to the Hub |
41 | | - title: Loading & Hub |
42 | | - - sections: |
43 | | - - local: using-diffusers/pipeline_overview |
44 | | - title: Overview |
45 | | - - local: using-diffusers/unconditional_image_generation |
| 115 | + - local: training/unconditional_training |
46 | 116 | title: Unconditional image generation |
47 | | - - local: using-diffusers/conditional_image_generation |
| 117 | + - local: training/text2image |
48 | 118 | title: Text-to-image |
49 | | - - local: using-diffusers/img2img |
50 | | - title: Image-to-image |
51 | | - - local: using-diffusers/inpaint |
52 | | - title: Inpainting |
53 | | - - local: using-diffusers/text-img2vid |
54 | | - title: Text or image-to-video |
55 | | - - local: using-diffusers/depth2img |
56 | | - title: Depth-to-image |
57 | | - title: Tasks |
58 | | - - sections: |
59 | | - - local: using-diffusers/textual_inversion_inference |
60 | | - title: Textual inversion |
61 | | - - local: using-diffusers/ip_adapter |
62 | | - title: IP-Adapter |
63 | | - - local: using-diffusers/merge_loras |
64 | | - title: Merge LoRAs |
65 | | - - local: training/distributed_inference |
66 | | - title: Distributed inference with multiple GPUs |
67 | | - - local: using-diffusers/reusing_seeds |
68 | | - title: Improve image quality with deterministic generation |
69 | | - - local: using-diffusers/control_brightness |
70 | | - title: Control image brightness |
71 | | - - local: using-diffusers/weighted_prompts |
72 | | - title: Prompt techniques |
73 | | - - local: using-diffusers/freeu |
74 | | - title: Improve generation quality with FreeU |
75 | | - title: Techniques |
76 | | - - sections: |
77 | | - - local: using-diffusers/pipeline_overview |
78 | | - title: Overview |
79 | | - - local: using-diffusers/sdxl |
| 119 | + - local: training/sdxl |
80 | 120 | title: Stable Diffusion XL |
81 | | - - local: using-diffusers/sdxl_turbo |
82 | | - title: SDXL Turbo |
83 | | - - local: using-diffusers/kandinsky |
84 | | - title: Kandinsky |
85 | | - - local: using-diffusers/controlnet |
| 121 | + - local: training/kandinsky |
| 122 | + title: Kandinsky 2.2 |
| 123 | + - local: training/wuerstchen |
| 124 | + title: Wuerstchen |
| 125 | + - local: training/controlnet |
86 | 126 | title: ControlNet |
87 | | - - local: using-diffusers/t2i_adapter |
88 | | - title: T2I-Adapter |
89 | | - - local: using-diffusers/shap-e |
90 | | - title: Shap-E |
91 | | - - local: using-diffusers/diffedit |
92 | | - title: DiffEdit |
93 | | - - local: using-diffusers/distilled_sd |
94 | | - title: Distilled Stable Diffusion inference |
95 | | - - local: using-diffusers/callback |
96 | | - title: Pipeline callbacks |
97 | | - - local: using-diffusers/reproducibility |
98 | | - title: Create reproducible pipelines |
99 | | - - local: using-diffusers/custom_pipeline_examples |
100 | | - title: Community pipelines |
101 | | - - local: using-diffusers/contribute_pipeline |
102 | | - title: Contribute a community pipeline |
103 | | - - local: using-diffusers/inference_with_lcm_lora |
104 | | - title: Latent Consistency Model-LoRA |
105 | | - - local: using-diffusers/inference_with_lcm |
106 | | - title: Latent Consistency Model |
107 | | - - local: using-diffusers/inference_with_tcd_lora |
108 | | - title: Trajectory Consistency Distillation-LoRA |
109 | | - - local: using-diffusers/svd |
110 | | - title: Stable Video Diffusion |
111 | | - title: Specific pipeline examples |
112 | | - - sections: |
113 | | - - local: training/overview |
114 | | - title: Overview |
115 | | - - local: training/create_dataset |
116 | | - title: Create a dataset for training |
117 | | - - local: training/adapt_a_model |
118 | | - title: Adapt a model to a new task |
119 | | - - sections: |
120 | | - - local: training/unconditional_training |
121 | | - title: Unconditional image generation |
122 | | - - local: training/text2image |
123 | | - title: Text-to-image |
124 | | - - local: training/sdxl |
125 | | - title: Stable Diffusion XL |
126 | | - - local: training/kandinsky |
127 | | - title: Kandinsky 2.2 |
128 | | - - local: training/wuerstchen |
129 | | - title: Wuerstchen |
130 | | - - local: training/controlnet |
131 | | - title: ControlNet |
132 | | - - local: training/t2i_adapters |
133 | | - title: T2I-Adapters |
134 | | - - local: training/instructpix2pix |
135 | | - title: InstructPix2Pix |
136 | | - title: Models |
137 | | - - sections: |
138 | | - - local: training/text_inversion |
139 | | - title: Textual Inversion |
140 | | - - local: training/dreambooth |
141 | | - title: DreamBooth |
142 | | - - local: training/lora |
143 | | - title: LoRA |
144 | | - - local: training/custom_diffusion |
145 | | - title: Custom Diffusion |
146 | | - - local: training/lcm_distill |
147 | | - title: Latent Consistency Distillation |
148 | | - - local: training/ddpo |
149 | | - title: Reinforcement learning training with DDPO |
150 | | - title: Methods |
151 | | - title: Training |
| 127 | + - local: training/t2i_adapters |
| 128 | + title: T2I-Adapters |
| 129 | + - local: training/instructpix2pix |
| 130 | + title: InstructPix2Pix |
| 131 | + title: Models |
| 132 | + isExpanded: false |
152 | 133 | - sections: |
153 | | - - local: using-diffusers/other-modalities |
154 | | - title: Other Modalities |
155 | | - title: Taking Diffusers Beyond Images |
156 | | - title: Using Diffusers |
| 134 | + - local: training/text_inversion |
| 135 | + title: Textual Inversion |
| 136 | + - local: training/dreambooth |
| 137 | + title: DreamBooth |
| 138 | + - local: training/lora |
| 139 | + title: LoRA |
| 140 | + - local: training/custom_diffusion |
| 141 | + title: Custom Diffusion |
| 142 | + - local: training/lcm_distill |
| 143 | + title: Latent Consistency Distillation |
| 144 | + - local: training/ddpo |
| 145 | + title: Reinforcement learning training with DDPO |
| 146 | + title: Methods |
| 147 | + isExpanded: false |
| 148 | + title: Training |
157 | 149 | - sections: |
158 | | - - local: optimization/opt_overview |
159 | | - title: Overview |
160 | | - - sections: |
161 | | - - local: optimization/fp16 |
162 | | - title: Speed up inference |
163 | | - - local: optimization/memory |
164 | | - title: Reduce memory usage |
165 | | - - local: optimization/torch2.0 |
166 | | - title: PyTorch 2.0 |
167 | | - - local: optimization/xformers |
168 | | - title: xFormers |
169 | | - - local: optimization/tome |
170 | | - title: Token merging |
171 | | - - local: optimization/deepcache |
172 | | - title: DeepCache |
173 | | - - local: optimization/tgate |
174 | | - title: TGATE |
175 | | - title: General optimizations |
| 150 | + - local: optimization/fp16 |
| 151 | + title: Speed up inference |
| 152 | + - local: using-diffusers/distilled_sd |
| 153 | + title: Distilled Stable Diffusion inference |
| 154 | + - local: optimization/memory |
| 155 | + title: Reduce memory usage |
| 156 | + - local: optimization/torch2.0 |
| 157 | + title: PyTorch 2.0 |
| 158 | + - local: optimization/xformers |
| 159 | + title: xFormers |
| 160 | + - local: optimization/tome |
| 161 | + title: Token merging |
| 162 | + - local: optimization/deepcache |
| 163 | + title: DeepCache |
| 164 | + - local: optimization/tgate |
| 165 | + title: TGATE |
176 | 166 | - sections: |
177 | 167 | - local: using-diffusers/stable_diffusion_jax_how_to |
178 | 168 | title: JAX/Flax |
|
182 | 172 | title: OpenVINO |
183 | 173 | - local: optimization/coreml |
184 | 174 | title: Core ML |
185 | | - title: Optimized model types |
| 175 | + title: Optimized model formats |
186 | 176 | - sections: |
187 | 177 | - local: optimization/mps |
188 | 178 | title: Metal Performance Shaders (MPS) |
189 | 179 | - local: optimization/habana |
190 | 180 | title: Habana Gaudi |
191 | 181 | title: Optimized hardware |
192 | | - title: Optimization |
| 182 | + title: Accelerate inference and reduce memory |
193 | 183 | - sections: |
194 | 184 | - local: conceptual/philosophy |
195 | 185 | title: Philosophy |
|
211 | 201 | - local: api/outputs |
212 | 202 | title: Outputs |
213 | 203 | title: Main Classes |
| 204 | + isExpanded: false |
214 | 205 | - sections: |
215 | 206 | - local: api/loaders/ip_adapter |
216 | 207 | title: IP-Adapter |
|
225 | 216 | - local: api/loaders/peft |
226 | 217 | title: PEFT |
227 | 218 | title: Loaders |
| 219 | + isExpanded: false |
228 | 220 | - sections: |
229 | 221 | - local: api/models/overview |
230 | 222 | title: Overview |
|
259 | 251 | - local: api/models/controlnet |
260 | 252 | title: ControlNet |
261 | 253 | title: Models |
| 254 | + isExpanded: false |
262 | 255 | - sections: |
263 | 256 | - local: api/pipelines/overview |
264 | 257 | title: Overview |
|
383 | 376 | - local: api/pipelines/wuerstchen |
384 | 377 | title: Wuerstchen |
385 | 378 | title: Pipelines |
| 379 | + isExpanded: false |
386 | 380 | - sections: |
387 | 381 | - local: api/schedulers/overview |
388 | 382 | title: Overview |
|
443 | 437 | - local: api/schedulers/vq_diffusion |
444 | 438 | title: VQDiffusionScheduler |
445 | 439 | title: Schedulers |
| 440 | + isExpanded: false |
446 | 441 | - sections: |
447 | 442 | - local: api/internal_classes_overview |
448 | 443 | title: Overview |
|
457 | 452 | - local: api/image_processor |
458 | 453 | title: VAE Image Processor |
459 | 454 | title: Internal classes |
| 455 | + isExpanded: false |
460 | 456 | title: API |
0 commit comments