Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	Rollback to cpu
Browse files
    	
        sgm/modules/encoders/modules.py
    CHANGED
    
    | 
         @@ -529,7 +529,7 @@ class FrozenOpenCLIPEmbedder2(AbstractEmbModel): 
     | 
|
| 529 | 
         
             
                    assert layer in self.LAYERS
         
     | 
| 530 | 
         
             
                    model, _, _ = open_clip.create_model_and_transforms(
         
     | 
| 531 | 
         
             
                        arch,
         
     | 
| 532 | 
         
            -
                        device=torch.device(" 
     | 
| 533 | 
         
             
                        pretrained=version if SDXL_CLIP2_CKPT_PTH is None else SDXL_CLIP2_CKPT_PTH,
         
     | 
| 534 | 
         
             
                    )
         
     | 
| 535 | 
         
             
                    del model.visual
         
     | 
| 
         @@ -628,7 +628,7 @@ class FrozenOpenCLIPEmbedder(AbstractEmbModel): 
     | 
|
| 628 | 
         
             
                    super().__init__()
         
     | 
| 629 | 
         
             
                    assert layer in self.LAYERS
         
     | 
| 630 | 
         
             
                    model, _, _ = open_clip.create_model_and_transforms(
         
     | 
| 631 | 
         
            -
                        arch, device=torch.device(" 
     | 
| 632 | 
         
             
                    )
         
     | 
| 633 | 
         
             
                    del model.visual
         
     | 
| 634 | 
         
             
                    self.model = model
         
     | 
| 
         @@ -703,7 +703,7 @@ class FrozenOpenCLIPImageEmbedder(AbstractEmbModel): 
     | 
|
| 703 | 
         
             
                    super().__init__()
         
     | 
| 704 | 
         
             
                    model, _, _ = open_clip.create_model_and_transforms(
         
     | 
| 705 | 
         
             
                        arch,
         
     | 
| 706 | 
         
            -
                        device=torch.device(" 
     | 
| 707 | 
         
             
                        pretrained=version,
         
     | 
| 708 | 
         
             
                    )
         
     | 
| 709 | 
         
             
                    del model.transformer
         
     | 
| 
         @@ -1059,4 +1059,4 @@ class GaussianEncoder(Encoder, AbstractEmbModel): 
     | 
|
| 1059 | 
         
             
                    log["weight"] = self.weight
         
     | 
| 1060 | 
         
             
                    if self.flatten_output:
         
     | 
| 1061 | 
         
             
                        z = rearrange(z, "b c h w -> b (h w ) c")
         
     | 
| 1062 | 
         
            -
                    return log, z
         
     | 
| 
         | 
|
| 529 | 
         
             
                    assert layer in self.LAYERS
         
     | 
| 530 | 
         
             
                    model, _, _ = open_clip.create_model_and_transforms(
         
     | 
| 531 | 
         
             
                        arch,
         
     | 
| 532 | 
         
            +
                        device=torch.device("cpu"),
         
     | 
| 533 | 
         
             
                        pretrained=version if SDXL_CLIP2_CKPT_PTH is None else SDXL_CLIP2_CKPT_PTH,
         
     | 
| 534 | 
         
             
                    )
         
     | 
| 535 | 
         
             
                    del model.visual
         
     | 
| 
         | 
|
| 628 | 
         
             
                    super().__init__()
         
     | 
| 629 | 
         
             
                    assert layer in self.LAYERS
         
     | 
| 630 | 
         
             
                    model, _, _ = open_clip.create_model_and_transforms(
         
     | 
| 631 | 
         
            +
                        arch, device=torch.device("cpu"), pretrained=version
         
     | 
| 632 | 
         
             
                    )
         
     | 
| 633 | 
         
             
                    del model.visual
         
     | 
| 634 | 
         
             
                    self.model = model
         
     | 
| 
         | 
|
| 703 | 
         
             
                    super().__init__()
         
     | 
| 704 | 
         
             
                    model, _, _ = open_clip.create_model_and_transforms(
         
     | 
| 705 | 
         
             
                        arch,
         
     | 
| 706 | 
         
            +
                        device=torch.device("cpu"),
         
     | 
| 707 | 
         
             
                        pretrained=version,
         
     | 
| 708 | 
         
             
                    )
         
     | 
| 709 | 
         
             
                    del model.transformer
         
     | 
| 
         | 
|
| 1059 | 
         
             
                    log["weight"] = self.weight
         
     | 
| 1060 | 
         
             
                    if self.flatten_output:
         
     | 
| 1061 | 
         
             
                        z = rearrange(z, "b c h w -> b (h w ) c")
         
     | 
| 1062 | 
         
            +
                    return log, z
         
     |