Image-Text-to-Text
sentence-transformers
Safetensors
Transformers
qwen2_vl
Qwen2-VL
conversational
marco commited on
Commit
71c5ad0
·
verified ·
1 Parent(s): 3974dfd

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -52,7 +52,7 @@ pip install -U llama-index-embeddings-huggingface
52
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
53
 
54
  model = HuggingFaceEmbedding(
55
- model_name="llamaindex/vdr-2b-v1",
56
  device="cpu", # "mps" for mac, "cuda" for nvidia GPUs
57
  trust_remote_code=True,
58
  )
@@ -81,7 +81,7 @@ min_pixels = 1 * 28 * 28
81
 
82
  # Load the embedding model and processor
83
  model = Qwen2VLForConditionalGeneration.from_pretrained(
84
- 'llamaindex/vdr-2b-v1',
85
  # These are the recommended kwargs for the model, but change them as needed
86
  attn_implementation="flash_attention_2",
87
  torch_dtype=torch.bfloat16,
@@ -89,7 +89,7 @@ model = Qwen2VLForConditionalGeneration.from_pretrained(
89
  ).eval()
90
 
91
  processor = AutoProcessor.from_pretrained(
92
- 'llamaindex/vdr-2b-v1',
93
  min_pixels=min_pixels,
94
  max_pixels=max_pixels
95
  )
@@ -217,7 +217,7 @@ via SentenceTransformers
217
  from sentence_transformers import SentenceTransformer
218
 
219
  model = SentenceTransformer(
220
- model_name_or_path="llamaindex/vdr-2b-v1",
221
  device="cuda",
222
  trust_remote_code=True,
223
  # These are the recommended kwargs for the model, but change them as needed if you don't have CUDA
 
52
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
53
 
54
  model = HuggingFaceEmbedding(
55
+ model_name="llamaindex/vdr-2b-multi-v1",
56
  device="cpu", # "mps" for mac, "cuda" for nvidia GPUs
57
  trust_remote_code=True,
58
  )
 
81
 
82
  # Load the embedding model and processor
83
  model = Qwen2VLForConditionalGeneration.from_pretrained(
84
+ 'llamaindex/vdr-2b-multi-v1',
85
  # These are the recommended kwargs for the model, but change them as needed
86
  attn_implementation="flash_attention_2",
87
  torch_dtype=torch.bfloat16,
 
89
  ).eval()
90
 
91
  processor = AutoProcessor.from_pretrained(
92
+ 'llamaindex/vdr-2b-multi-v1',
93
  min_pixels=min_pixels,
94
  max_pixels=max_pixels
95
  )
 
217
  from sentence_transformers import SentenceTransformer
218
 
219
  model = SentenceTransformer(
220
+ model_name_or_path="llamaindex/vdr-2b-multi-v1",
221
  device="cuda",
222
  trust_remote_code=True,
223
  # These are the recommended kwargs for the model, but change them as needed if you don't have CUDA