daekeun-ml commited on
Commit
ec82e1d
ยท
verified ยท
1 Parent(s): 1718362

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -3
README.md CHANGED
@@ -117,10 +117,9 @@ from datasets import load_dataset
117
  from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
118
 
119
  max_new_tokens = 256
120
- orig_model_path = "microsoft/Phi-4-multimodal-instruct"
121
  ft_model_path = "daekeun-ml/Phi-4-multimodal-finetune-ko-speech"
122
  generation_config = GenerationConfig.from_pretrained(ft_model_path, 'generation_config.json')
123
- processor = AutoProcessor.from_pretrained(orig_model_path, trust_remote_code=True)
124
  model = AutoModelForCausalLM.from_pretrained(
125
  ft_model_path,
126
  trust_remote_code=True,
@@ -154,7 +153,7 @@ generate_ids = generate_ids[:, inputs['input_ids'].shape[1] :]
154
  response = processor.batch_decode(
155
  generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
156
  )[0]
157
- print(response) # "๋ชฌํ†  ํ‚ฌ์€ ์ž๋…€๋“ค์ด ์‚ฌ๋ž‘์„ ์ œ๋Œ€๋กœ ๋ชป ๋ฐ›๊ณ  ํฌ๋ฉด ๋งค์šฐ ์‹ฌ๊ฐํ•œ ๊ฒฐ๊ณผ๊ฐ€ ์ดˆ๋ž˜๋œ๋‹ค๋Š” ๊ฒฐ๋ก ์„ ๋‚ด๋ ธ์Šต๋‹ˆ๋‹ค"
158
  ```
159
 
160
  ### Demos
 
117
  from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
118
 
119
  max_new_tokens = 256
 
120
  ft_model_path = "daekeun-ml/Phi-4-multimodal-finetune-ko-speech"
121
  generation_config = GenerationConfig.from_pretrained(ft_model_path, 'generation_config.json')
122
+ processor = AutoProcessor.from_pretrained(ft_model_path, trust_remote_code=True)
123
  model = AutoModelForCausalLM.from_pretrained(
124
  ft_model_path,
125
  trust_remote_code=True,
 
153
  response = processor.batch_decode(
154
  generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
155
  )[0]
156
+ print(response) # "๋ชฌํ„ฐ๊ทœ๋Š” ์ž๋…€๋“ค์ด ์‚ฌ๋ž‘์„ ์ œ๋Œ€๋กœ ๋ชป ๋ฐ›๊ณ  ํฌ๋ฉด ๋งค์šฐ ์‹ฌ๊ฐํ•œ ๊ฒฐ๊ณผ๊ฐ€ ์ดˆ๋ž˜๋œ๋‹ค๋Š” ๊ฒฐ๋ก ์„ ๋‚ด๋ ธ์Šต๋‹ˆ๋‹ค"
157
  ```
158
 
159
  ### Demos