Spaces:
				
			
			
	
			
			
		Paused
		
	
	
	
			
			
	
	
	
	
		
		
		Paused
		
	Commit 
							
							·
						
						a464cec
	
1
								Parent(s):
							
							084a525
								
Update app_v1.py
Browse files
    	
        app_v1.py
    CHANGED
    
    | @@ -23,13 +23,18 @@ os.environ['HF_HOME'] = cachedir | |
| 23 | 
             
            local_folder = cachedir + "/model"
         | 
| 24 |  | 
| 25 | 
             
            quantized_model_dir = "FPHam/Jackson_The_Formalizer_V2_13b_GPTQ"
         | 
| 26 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
| 27 |  | 
| 28 | 
             
            model_basename = cachedir + "/model/Jackson2-4bit-128g-GPTQ"
         | 
| 29 |  | 
| 30 | 
             
            use_strict = False
         | 
| 31 | 
             
            use_triton = False
         | 
| 32 |  | 
|  | |
| 33 | 
             
            tokenizer = AutoTokenizer.from_pretrained(local_folder, use_fast=False)
         | 
| 34 |  | 
| 35 | 
             
            quantize_config = BaseQuantizeConfig(
         | 
|  | |
| 23 | 
             
            local_folder = cachedir + "/model"
         | 
| 24 |  | 
| 25 | 
             
            quantized_model_dir = "FPHam/Jackson_The_Formalizer_V2_13b_GPTQ"
         | 
| 26 | 
            +
             | 
| 27 | 
            +
            # Check if the model has already been downloaded
         | 
| 28 | 
            +
            model_path = os.path.join(local_folder, 'pytorch_model.bin')
         | 
| 29 | 
            +
            if not os.path.isfile(model_path):
         | 
| 30 | 
            +
                snapshot_download(repo_id=quantized_model_dir, local_dir=local_folder, local_dir_use_symlinks=True)
         | 
| 31 |  | 
| 32 | 
             
            model_basename = cachedir + "/model/Jackson2-4bit-128g-GPTQ"
         | 
| 33 |  | 
| 34 | 
             
            use_strict = False
         | 
| 35 | 
             
            use_triton = False
         | 
| 36 |  | 
| 37 | 
            +
            # Load tokenizer and model
         | 
| 38 | 
             
            tokenizer = AutoTokenizer.from_pretrained(local_folder, use_fast=False)
         | 
| 39 |  | 
| 40 | 
             
            quantize_config = BaseQuantizeConfig(
         | 
