Ju214 commited on
Commit
19924d8
·
1 Parent(s): 955043f

update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -5
README.md CHANGED
@@ -30,8 +30,6 @@ base_model:
30
  - mistralai/Mistral-Small-3.1-24B-Instruct-2503
31
  pipeline_tag: image-text-to-text
32
  tags:
33
- - neuralmagic
34
- - redhat
35
  - llmcompressor
36
  - quantized
37
  - int4
@@ -56,7 +54,7 @@ tags:
56
  - **Out-of-scope:** Use in any manner that violates applicable laws or regulations (including trade compliance laws). Use in languages not officially supported by the model.
57
  - **Release Date:** 04/15/2025
58
  - **Version:** 1.0
59
- - **Model Developers:** Red Hat (Neural Magic)
60
 
61
 
62
  ### Model Optimizations
@@ -77,13 +75,13 @@ This model can be deployed efficiently using the [vLLM](https://docs.vllm.ai/en/
77
  from vllm import LLM, SamplingParams
78
  from transformers import AutoProcessor
79
 
80
- model_id = "RedHatAI/Mistral-Small-3.1-24B-Instruct-2503-FP8-dynamic"
81
  number_gpus = 1
82
 
83
  sampling_params = SamplingParams(temperature=0.7, top_p=0.8, max_tokens=256)
84
  processor = AutoProcessor.from_pretrained(model_id)
85
 
86
- messages = [{"role": "user", "content": "Give me a short introduction to large language model."}]
87
 
88
  prompts = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
89
 
 
30
  - mistralai/Mistral-Small-3.1-24B-Instruct-2503
31
  pipeline_tag: image-text-to-text
32
  tags:
 
 
33
  - llmcompressor
34
  - quantized
35
  - int4
 
54
  - **Out-of-scope:** Use in any manner that violates applicable laws or regulations (including trade compliance laws). Use in languages not officially supported by the model.
55
  - **Release Date:** 04/15/2025
56
  - **Version:** 1.0
57
+ - **Model Developers:** Ju214 (Neural Magic)
58
 
59
 
60
  ### Model Optimizations
 
75
  from vllm import LLM, SamplingParams
76
  from transformers import AutoProcessor
77
 
78
+ model_id = "Ju214/Mistral-Small-24B-3.1"
79
  number_gpus = 1
80
 
81
  sampling_params = SamplingParams(temperature=0.7, top_p=0.8, max_tokens=256)
82
  processor = AutoProcessor.from_pretrained(model_id)
83
 
84
+ messages = [{"role": "user", "content": "Give me a introduction to large language model."}]
85
 
86
  prompts = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
87