Hon-Wong nielsr HF Staff commited on
Commit
a4723d6
·
verified ·
1 Parent(s): 24b0f73

Add metadata (#1)

Browse files

- Add metadata (6855994a03d4049370bc50f8cdd44b8d38d67629)


Co-authored-by: Niels Rogge <[email protected]>

Files changed (1) hide show
  1. README.md +35 -29
README.md CHANGED
@@ -1,37 +1,43 @@
 
 
 
 
 
1
  # VoRA
 
2
  * [ArXiv Paper](https://arxiv.org/abs/2503.20680)
3
  * [Github](https://github.com/Hon-Wong/VoRA)
4
 
5
  ## Quickstart
6
 
7
  ```python
8
- import torch
9
- from transformers import AutoProcessor, AutoModelForCausalLM
10
- model_name = "Hon-Wong/VoRA-7B-Base"
11
- processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True)
12
- model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
13
- conversation = [
14
- {
15
- "role":"user",
16
- "content":[
17
- {
18
- "type":"image",
19
- "url": "{image path or url}"
20
- },
21
- {
22
- "type":"text",
23
- "text":"<image> Describe this image."
24
- }
25
- ]
26
- }
27
- ]
28
- model_inputs = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=True, return_tensors='pt', return_dict=True).to(model.device)
29
- gen_kwargs = {"max_new_tokens": 1024, "eos_token_id": processor.tokenizer.eos_token_id}
30
-
31
- with torch.inference_mode():
32
- outputs = model.generate(model_inputs, **gen_kwargs)
33
- output_text = processor.tokenizer.batch_decode(
34
- outputs, skip_special_tokens=True
35
- )
36
- print(output_text)
37
  ```
 
1
+ ---
2
+ library_name: transformers
3
+ pipeline_tag: image-text-to-text
4
+ ---
5
+
6
  # VoRA
7
+
8
  * [ArXiv Paper](https://arxiv.org/abs/2503.20680)
9
  * [Github](https://github.com/Hon-Wong/VoRA)
10
 
11
  ## Quickstart
12
 
13
  ```python
14
+ import torch
15
+ from transformers import AutoProcessor, AutoModelForCausalLM
16
+ model_name = "Hon-Wong/VoRA-7B-Base"
17
+ processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True)
18
+ model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
19
+ conversation = [
20
+ {
21
+ "role":"user",
22
+ "content":[
23
+ {
24
+ "type":"image",
25
+ "url": "{image path or url}"
26
+ },
27
+ {
28
+ "type":"text",
29
+ "text":"<image> Describe this image."
30
+ }
31
+ ]
32
+ }
33
+ ]
34
+ model_inputs = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=True, return_tensors='pt', return_dict=True).to(model.device)
35
+ gen_kwargs = {"max_new_tokens": 1024, "eos_token_id": processor.tokenizer.eos_token_id}
36
+
37
+ with torch.inference_mode():
38
+ outputs = model.generate(model_inputs, **gen_kwargs)
39
+ output_text = processor.tokenizer.batch_decode(
40
+ outputs, skip_special_tokens=True
41
+ )
42
+ print(output_text)
43
  ```