Update README.md
Browse files
README.md
CHANGED
@@ -117,10 +117,9 @@ from datasets import load_dataset
|
|
117 |
from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
|
118 |
|
119 |
max_new_tokens = 256
|
120 |
-
orig_model_path = "microsoft/Phi-4-multimodal-instruct"
|
121 |
ft_model_path = "daekeun-ml/Phi-4-multimodal-finetune-ko-speech"
|
122 |
generation_config = GenerationConfig.from_pretrained(ft_model_path, 'generation_config.json')
|
123 |
-
processor = AutoProcessor.from_pretrained(
|
124 |
model = AutoModelForCausalLM.from_pretrained(
|
125 |
ft_model_path,
|
126 |
trust_remote_code=True,
|
@@ -154,7 +153,7 @@ generate_ids = generate_ids[:, inputs['input_ids'].shape[1] :]
|
|
154 |
response = processor.batch_decode(
|
155 |
generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
156 |
)[0]
|
157 |
-
print(response) # "
|
158 |
```
|
159 |
|
160 |
### Demos
|
|
|
117 |
from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
|
118 |
|
119 |
max_new_tokens = 256
|
|
|
120 |
ft_model_path = "daekeun-ml/Phi-4-multimodal-finetune-ko-speech"
|
121 |
generation_config = GenerationConfig.from_pretrained(ft_model_path, 'generation_config.json')
|
122 |
+
processor = AutoProcessor.from_pretrained(ft_model_path, trust_remote_code=True)
|
123 |
model = AutoModelForCausalLM.from_pretrained(
|
124 |
ft_model_path,
|
125 |
trust_remote_code=True,
|
|
|
153 |
response = processor.batch_decode(
|
154 |
generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
155 |
)[0]
|
156 |
+
print(response) # "๋ชฌํฐ๊ท๋ ์๋
๋ค์ด ์ฌ๋์ ์ ๋๋ก ๋ชป ๋ฐ๊ณ ํฌ๋ฉด ๋งค์ฐ ์ฌ๊ฐํ ๊ฒฐ๊ณผ๊ฐ ์ด๋๋๋ค๋ ๊ฒฐ๋ก ์ ๋ด๋ ธ์ต๋๋ค"
|
157 |
```
|
158 |
|
159 |
### Demos
|