Uploading model files
Browse files- README.md +4 -4
 - adapter_config.json +11 -6
 - adapter_model.safetensors +2 -2
 - chat_template.jinja +1 -0
 - tokenizer_config.json +0 -1
 - training_args.bin +2 -2
 
    	
        README.md
    CHANGED
    
    | 
         @@ -31,7 +31,7 @@ More information needed 
     | 
|
| 31 | 
         
             
            ### Framework versions
         
     | 
| 32 | 
         | 
| 33 | 
         
             
            - PEFT 0.15.2
         
     | 
| 34 | 
         
            -
            - Transformers 4. 
     | 
| 35 | 
         
            -
            - Pytorch 2. 
     | 
| 36 | 
         
            -
            - Datasets 3. 
     | 
| 37 | 
         
            -
            - Tokenizers 0.21. 
     | 
| 
         | 
|
| 31 | 
         
             
            ### Framework versions
         
     | 
| 32 | 
         | 
| 33 | 
         
             
            - PEFT 0.15.2
         
     | 
| 34 | 
         
            +
            - Transformers 4.52.4
         
     | 
| 35 | 
         
            +
            - Pytorch 2.8.0+cu128
         
     | 
| 36 | 
         
            +
            - Datasets 3.6.0
         
     | 
| 37 | 
         
            +
            - Tokenizers 0.21.1
         
     | 
    	
        adapter_config.json
    CHANGED
    
    | 
         @@ -3,6 +3,9 @@ 
     | 
|
| 3 | 
         
             
              "auto_mapping": null,
         
     | 
| 4 | 
         
             
              "base_model_name_or_path": "microsoft/Phi-4-mini-instruct",
         
     | 
| 5 | 
         
             
              "bias": "none",
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 6 | 
         
             
              "fan_in_fan_out": false,
         
     | 
| 7 | 
         
             
              "inference_mode": true,
         
     | 
| 8 | 
         
             
              "init_lora_weights": true,
         
     | 
| 
         @@ -10,22 +13,24 @@ 
     | 
|
| 10 | 
         
             
              "layers_pattern": null,
         
     | 
| 11 | 
         
             
              "layers_to_transform": null,
         
     | 
| 12 | 
         
             
              "loftq_config": {},
         
     | 
| 13 | 
         
            -
              "lora_alpha":  
     | 
| 14 | 
         
            -
              " 
     | 
| 
         | 
|
| 15 | 
         
             
              "megatron_config": null,
         
     | 
| 16 | 
         
             
              "megatron_core": "megatron.core",
         
     | 
| 17 | 
         
             
              "modules_to_save": null,
         
     | 
| 18 | 
         
             
              "peft_type": "LORA",
         
     | 
| 19 | 
         
            -
              "r":  
     | 
| 20 | 
         
             
              "rank_pattern": {},
         
     | 
| 21 | 
         
             
              "revision": null,
         
     | 
| 22 | 
         
             
              "target_modules": [
         
     | 
| 23 | 
         
            -
                "gate_up_proj",
         
     | 
| 24 | 
         
            -
                "qkv_proj",
         
     | 
| 25 | 
         
             
                "down_proj",
         
     | 
| 26 | 
         
            -
                "o_proj"
         
     | 
| 
         | 
|
| 
         | 
|
| 27 | 
         
             
              ],
         
     | 
| 28 | 
         
             
              "task_type": "CAUSAL_LM",
         
     | 
| 
         | 
|
| 29 | 
         
             
              "use_dora": false,
         
     | 
| 30 | 
         
             
              "use_rslora": true
         
     | 
| 31 | 
         
             
            }
         
     | 
| 
         | 
|
| 3 | 
         
             
              "auto_mapping": null,
         
     | 
| 4 | 
         
             
              "base_model_name_or_path": "microsoft/Phi-4-mini-instruct",
         
     | 
| 5 | 
         
             
              "bias": "none",
         
     | 
| 6 | 
         
            +
              "corda_config": null,
         
     | 
| 7 | 
         
            +
              "eva_config": null,
         
     | 
| 8 | 
         
            +
              "exclude_modules": null,
         
     | 
| 9 | 
         
             
              "fan_in_fan_out": false,
         
     | 
| 10 | 
         
             
              "inference_mode": true,
         
     | 
| 11 | 
         
             
              "init_lora_weights": true,
         
     | 
| 
         | 
|
| 13 | 
         
             
              "layers_pattern": null,
         
     | 
| 14 | 
         
             
              "layers_to_transform": null,
         
     | 
| 15 | 
         
             
              "loftq_config": {},
         
     | 
| 16 | 
         
            +
              "lora_alpha": 64,
         
     | 
| 17 | 
         
            +
              "lora_bias": false,
         
     | 
| 18 | 
         
            +
              "lora_dropout": 0.1,
         
     | 
| 19 | 
         
             
              "megatron_config": null,
         
     | 
| 20 | 
         
             
              "megatron_core": "megatron.core",
         
     | 
| 21 | 
         
             
              "modules_to_save": null,
         
     | 
| 22 | 
         
             
              "peft_type": "LORA",
         
     | 
| 23 | 
         
            +
              "r": 32,
         
     | 
| 24 | 
         
             
              "rank_pattern": {},
         
     | 
| 25 | 
         
             
              "revision": null,
         
     | 
| 26 | 
         
             
              "target_modules": [
         
     | 
| 
         | 
|
| 
         | 
|
| 27 | 
         
             
                "down_proj",
         
     | 
| 28 | 
         
            +
                "o_proj",
         
     | 
| 29 | 
         
            +
                "gate_up_proj",
         
     | 
| 30 | 
         
            +
                "qkv_proj"
         
     | 
| 31 | 
         
             
              ],
         
     | 
| 32 | 
         
             
              "task_type": "CAUSAL_LM",
         
     | 
| 33 | 
         
            +
              "trainable_token_indices": null,
         
     | 
| 34 | 
         
             
              "use_dora": false,
         
     | 
| 35 | 
         
             
              "use_rslora": true
         
     | 
| 36 | 
         
             
            }
         
     | 
    	
        adapter_model.safetensors
    CHANGED
    
    | 
         @@ -1,3 +1,3 @@ 
     | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            -
            oid sha256: 
     | 
| 3 | 
         
            -
            size  
     | 
| 
         | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:96539534c4692df48882f749e1c5335b1bd176f08763ef5aca6957105edffb0f
         
     | 
| 3 | 
         
            +
            size 184584072
         
     | 
    	
        chat_template.jinja
    ADDED
    
    | 
         @@ -0,0 +1 @@ 
     | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {% for message in messages %}{% if message['role'] == 'system' and 'tools' in message and message['tools'] is not none %}{{ '<|' + message['role'] + '|>' + message['content'] + '<|tool|>' + message['tools'] + '<|/tool|>' + '<|end|>' }}{% else %}{{ '<|' + message['role'] + '|>' + message['content'] + '<|end|>' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>' }}{% else %}{{ eos_token }}{% endif %}
         
     | 
    	
        tokenizer_config.json
    CHANGED
    
    | 
         @@ -109,7 +109,6 @@ 
     | 
|
| 109 | 
         
             
                }
         
     | 
| 110 | 
         
             
              },
         
     | 
| 111 | 
         
             
              "bos_token": "<|endoftext|>",
         
     | 
| 112 | 
         
            -
              "chat_template": "{% for message in messages %}{% if message['role'] == 'system' and 'tools' in message and message['tools'] is not none %}{{ '<|' + message['role'] + '|>' + message['content'] + '<|tool|>' + message['tools'] + '<|/tool|>' + '<|end|>' }}{% else %}{{ '<|' + message['role'] + '|>' + message['content'] + '<|end|>' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>' }}{% else %}{{ eos_token }}{% endif %}",
         
     | 
| 113 | 
         
             
              "clean_up_tokenization_spaces": false,
         
     | 
| 114 | 
         
             
              "eos_token": "<|im_end|>",
         
     | 
| 115 | 
         
             
              "extra_special_tokens": {},
         
     | 
| 
         | 
|
| 109 | 
         
             
                }
         
     | 
| 110 | 
         
             
              },
         
     | 
| 111 | 
         
             
              "bos_token": "<|endoftext|>",
         
     | 
| 
         | 
|
| 112 | 
         
             
              "clean_up_tokenization_spaces": false,
         
     | 
| 113 | 
         
             
              "eos_token": "<|im_end|>",
         
     | 
| 114 | 
         
             
              "extra_special_tokens": {},
         
     | 
    	
        training_args.bin
    CHANGED
    
    | 
         @@ -1,3 +1,3 @@ 
     | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            -
            oid sha256: 
     | 
| 3 | 
         
            -
            size  
     | 
| 
         | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:91a7473d33db7326dfd33d711b8248f2d0ec8796c98b6989084de8fc4d60103a
         
     | 
| 3 | 
         
            +
            size 6097
         
     |