vk commited on
Commit
d929284
·
1 Parent(s): 351cd89

peft model id change

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -49,7 +49,7 @@ if __name__ == "__main__":
49
  tasks=["detect","extract handwritten_text","ocr","segment"]
50
  device = torch.device("cpu")
51
  # bnb_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16) #for gpu
52
- peft_model_id = "vk888/paligemma_vqav2"
53
  model_id = "google/paligemma2-3b-pt-448"
54
  config = PeftConfig.from_pretrained(peft_model_id)
55
  base_model = PaliGemmaForConditionalGeneration.from_pretrained(config.base_model_name_or_path,
@@ -73,4 +73,4 @@ if __name__ == "__main__":
73
  title="DocVQA with Paligemma2 VLM",
74
  description="DocVQA with Paligemma2 VLM. Running on CPU .Each prompt can take 4-5 mins, better to clone & run locally. Thanks for your patience :) "
75
  )
76
- iface.launch(share=True)
 
49
  tasks=["detect","extract handwritten_text","ocr","segment"]
50
  device = torch.device("cpu")
51
  # bnb_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16) #for gpu
52
+ peft_model_id = "vk888/paligemma2_vqav2_hw"
53
  model_id = "google/paligemma2-3b-pt-448"
54
  config = PeftConfig.from_pretrained(peft_model_id)
55
  base_model = PaliGemmaForConditionalGeneration.from_pretrained(config.base_model_name_or_path,
 
73
  title="DocVQA with Paligemma2 VLM",
74
  description="DocVQA with Paligemma2 VLM. Running on CPU .Each prompt can take 4-5 mins, better to clone & run locally. Thanks for your patience :) "
75
  )
76
+ iface.launch(share=True)