Commit 
							
							·
						
						c5d43bc
	
1
								Parent(s):
							
							941d256
								
finish
Browse files- cd_bedroom256_lpips_onestep_sample.png +0 -0
- control_net_canny.py +2 -2
- run_better_trans_clip.py +14 -0
- run_consistency.py +18 -0
- run_lora +0 -1
- run_no_grad.py +22 -0
- run_safety.py +13 -0
- run_sd_xl.py +2 -0
- sd_2_1_API.py +17 -0
    	
        cd_bedroom256_lpips_onestep_sample.png
    ADDED
    
    |   | 
    	
        control_net_canny.py
    CHANGED
    
    | @@ -33,14 +33,14 @@ canny_image = Image.fromarray(image) | |
| 33 |  | 
| 34 | 
             
            controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16)
         | 
| 35 | 
             
            pipe = StableDiffusionControlNetPipeline.from_pretrained(
         | 
| 36 | 
            -
                "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
         | 
| 37 | 
             
            )
         | 
| 38 |  | 
| 39 | 
             
            pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
         | 
| 40 | 
             
            pipe.enable_model_cpu_offload()
         | 
| 41 |  | 
| 42 | 
             
            generator = torch.manual_seed(33)
         | 
| 43 | 
            -
            out_image = pipe("a blue paradise bird in the jungle", num_inference_steps=20, generator=generator, image=canny_image).images[0]
         | 
| 44 |  | 
| 45 | 
             
            path = os.path.join(Path.home(), "images", "aa.png")
         | 
| 46 | 
             
            out_image.save(path)
         | 
|  | |
| 33 |  | 
| 34 | 
             
            controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16)
         | 
| 35 | 
             
            pipe = StableDiffusionControlNetPipeline.from_pretrained(
         | 
| 36 | 
            +
                "runwayml/stable-diffusion-v1-5", controlnet=[controlnet, controlnet], torch_dtype=torch.float16
         | 
| 37 | 
             
            )
         | 
| 38 |  | 
| 39 | 
             
            pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
         | 
| 40 | 
             
            pipe.enable_model_cpu_offload()
         | 
| 41 |  | 
| 42 | 
             
            generator = torch.manual_seed(33)
         | 
| 43 | 
            +
            out_image = pipe("a blue paradise bird in the jungle", control_guidance_start=[0.2, 0.2], num_inference_steps=20, generator=generator, image=[canny_image, canny_image]).images[0]
         | 
| 44 |  | 
| 45 | 
             
            path = os.path.join(Path.home(), "images", "aa.png")
         | 
| 46 | 
             
            out_image.save(path)
         | 
    	
        run_better_trans_clip.py
    ADDED
    
    | @@ -0,0 +1,14 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            #!/usr/bin/env python3
         | 
| 2 | 
            +
            from transformers import CLIPTextConfig, CLIPModel
         | 
| 3 | 
            +
            import torch
         | 
| 4 | 
            +
             | 
| 5 | 
            +
            config = CLIPTextConfig.from_pretrained("openMUSE/CLIP-ViT-L-14-DataComp.XL-s13B-b90K-penultimate")
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            model = CLIPModel.from_pretrained("laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K", text_config=config)
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            model.to_bettertransformer()
         | 
| 10 | 
            +
             | 
| 11 | 
            +
            text_encoder = model.text_model
         | 
| 12 | 
            +
            text_encoder = torch.compile(text_encoder)
         | 
| 13 | 
            +
             | 
| 14 | 
            +
             | 
    	
        run_consistency.py
    ADDED
    
    | @@ -0,0 +1,18 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            #!/usr/bin/env python3
         | 
| 2 | 
            +
            import torch
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            from diffusers import ConsistencyModelPipeline, UNet2DModel
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            device = "cpu"
         | 
| 7 | 
            +
            # Load the cd_bedroom256_lpips checkpoint.
         | 
| 8 | 
            +
            model_id_or_path = "openai/diffusers-cd_bedroom256_lpips"
         | 
| 9 | 
            +
            pipe = ConsistencyModelPipeline.from_pretrained(model_id_or_path)
         | 
| 10 | 
            +
            pipe.to(device)
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            # Multistep sampling
         | 
| 13 | 
            +
            # Timesteps can be explicitly specified; the particular timesteps below are from the original Github repo:
         | 
| 14 | 
            +
            # https://github.com/openai/consistency_models/blob/main/scripts/launch.sh#L83
         | 
| 15 | 
            +
            for _ in range(10):
         | 
| 16 | 
            +
                image = pipe(timesteps=[17, 0]).images[0]
         | 
| 17 | 
            +
                image.show()
         | 
| 18 | 
            +
             | 
    	
        run_lora
    CHANGED
    
    | @@ -12,7 +12,6 @@ from PIL import Image | |
| 12 | 
             
            from io import BytesIO
         | 
| 13 |  | 
| 14 | 
             
            # path = sys.argv[1]
         | 
| 15 | 
            -
            path = "runwayml/stable-diffusion-v1-5"
         | 
| 16 | 
             
            path = "gsdf/Counterfeit-V2.5"
         | 
| 17 | 
             
            # path = "stabilityai/stable-diffusion-2-1"
         | 
| 18 |  | 
|  | |
| 12 | 
             
            from io import BytesIO
         | 
| 13 |  | 
| 14 | 
             
            # path = sys.argv[1]
         | 
|  | |
| 15 | 
             
            path = "gsdf/Counterfeit-V2.5"
         | 
| 16 | 
             
            # path = "stabilityai/stable-diffusion-2-1"
         | 
| 17 |  | 
    	
        run_no_grad.py
    ADDED
    
    | @@ -0,0 +1,22 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            #!/usr/bin/env python3
         | 
| 2 | 
            +
            from transformers import BertModel
         | 
| 3 | 
            +
            import torch
         | 
| 4 | 
            +
            import time
         | 
| 5 | 
            +
             | 
| 6 | 
            +
             | 
| 7 | 
            +
            model = BertModel.from_pretrained("bert-base-uncased")
         | 
| 8 | 
            +
            model.to("cuda")
         | 
| 9 | 
            +
             | 
| 10 | 
            +
            input_ids = torch.ones((16, 256), dtype=torch.long)
         | 
| 11 | 
            +
            input_ids = input_ids.to("cuda")
         | 
| 12 | 
            +
             | 
| 13 | 
            +
            model.requires_grad_(False)
         | 
| 14 | 
            +
             | 
| 15 | 
            +
            start_time = time.time()
         | 
| 16 | 
            +
             | 
| 17 | 
            +
            for _ in range(5):
         | 
| 18 | 
            +
                with torch.no_grad():
         | 
| 19 | 
            +
                    logits = model(input_ids)
         | 
| 20 | 
            +
             | 
| 21 | 
            +
            print(time.time() - start_time)
         | 
| 22 | 
            +
             | 
    	
        run_safety.py
    ADDED
    
    | @@ -0,0 +1,13 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            #!/usr/bin/env python3
         | 
| 2 | 
            +
            import PIL
         | 
| 3 | 
            +
            from transformers import CLIPImageProcessor
         | 
| 4 | 
            +
            from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            feature_extractor = CLIPImageProcessor.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="feature_extractor")
         | 
| 7 | 
            +
            safety_checker = StableDiffusionSafetyChecker.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="safety_checker")
         | 
| 8 | 
            +
            device = "cpu"
         | 
| 9 | 
            +
             | 
| 10 | 
            +
            image = PIL.Image.open("/home/patrick/images/0.png")
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            safety_checker_input = feature_extractor(image, return_tensors="pt").to(device)
         | 
| 13 | 
            +
            image, has_nsfw_concept = safety_checker(images=image, clip_input=safety_checker_input.pixel_values)
         | 
    	
        run_sd_xl.py
    ADDED
    
    | @@ -0,0 +1,2 @@ | |
|  | |
|  | 
|  | |
| 1 | 
            +
            #!/usr/bin/env python3
         | 
| 2 | 
            +
            from diffusers import StableDiffusionXLPipeline
         | 
    	
        sd_2_1_API.py
    ADDED
    
    | @@ -0,0 +1,17 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            #!/usr/bin/env python3
         | 
| 2 | 
            +
            import requests
         | 
| 3 | 
            +
            import io
         | 
| 4 | 
            +
            from PIL import Image
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1"
         | 
| 7 | 
            +
            headers = {"Authorization": "Bearer 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"}
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            def query(payload):
         | 
| 10 | 
            +
            	response = requests.post(API_URL, headers=headers, json=payload)
         | 
| 11 | 
            +
            	return response.content
         | 
| 12 | 
            +
            image_bytes = query({
         | 
| 13 | 
            +
            	"inputs": "Astronaut riding a horse",
         | 
| 14 | 
            +
            })
         | 
| 15 | 
            +
            # You can access the image with PIL.Image for example
         | 
| 16 | 
            +
            image = Image.open(io.BytesIO(image_bytes))
         | 
| 17 | 
            +
            import ipdb; ipdb.set_trace()
         | 

