@@ -79,7 +79,7 @@ To save more GPU memory and get even more speed, you can load and run the model
7979```Python
8080pipe = StableDiffusionPipeline.from_pretrained(
8181 " runwayml/stable-diffusion-v1-5" ,
82- revision = " fp16 " ,
82+
8383 torch_dtype =torch.float16,
8484)
8585pipe = pipe.to("cuda")
@@ -107,7 +107,7 @@ from diffusers import StableDiffusionPipeline
107107
108108pipe = StableDiffusionPipeline.from_pretrained(
109109 " runwayml/stable-diffusion-v1-5" ,
110- revision = " fp16 " ,
110+
111111 torch_dtype =torch.float16,
112112)
113113pipe = pipe.to("cuda")
@@ -134,7 +134,7 @@ from diffusers import StableDiffusionPipeline
134134
135135pipe = StableDiffusionPipeline.from_pretrained(
136136 " runwayml/stable-diffusion-v1-5" ,
137- revision = " fp16 " ,
137+
138138 torch_dtype =torch.float16,
139139)
140140pipe = pipe.to("cuda")
@@ -159,7 +159,7 @@ from diffusers import StableDiffusionPipeline
159159
160160pipe = StableDiffusionPipeline.from_pretrained(
161161 " runwayml/stable-diffusion-v1-5" ,
162- revision = " fp16 " ,
162+
163163 torch_dtype =torch.float16,
164164)
165165pipe = pipe.to("cuda")
@@ -179,7 +179,7 @@ from diffusers import StableDiffusionPipeline
179179
180180pipe = StableDiffusionPipeline.from_pretrained(
181181 " runwayml/stable-diffusion-v1-5" ,
182- revision = " fp16 " ,
182+
183183 torch_dtype =torch.float16,
184184)
185185pipe = pipe.to("cuda")
@@ -234,7 +234,6 @@ def generate_inputs():
234234
235235pipe = StableDiffusionPipeline.from_pretrained(
236236 " runwayml/stable-diffusion-v1-5" ,
237- revision = " fp16" ,
238237 torch_dtype =torch.float16,
239238).to("cuda")
240239unet = pipe.unet
@@ -298,7 +297,6 @@ class UNet2DConditionOutput:
298297
299298pipe = StableDiffusionPipeline.from_pretrained(
300299 " runwayml/stable-diffusion-v1-5" ,
301- revision = " fp16" ,
302300 torch_dtype =torch.float16,
303301).to("cuda")
304302
@@ -349,7 +347,6 @@ import torch
349347
350348pipe = StableDiffusionPipeline.from_pretrained(
351349 " runwayml/stable-diffusion-v1-5" ,
352- revision = " fp16" ,
353350 torch_dtype = torch.float16,
354351).to(" cuda" )
355352
0 commit comments