manelhalima commited on
Commit
ff782aa
·
verified ·
1 Parent(s): 5158ce7

Update app.py

Browse files

conversion image

Files changed (1) hide show
  1. app.py +15 -0
app.py CHANGED
@@ -2,9 +2,22 @@ import gradio as gr
2
  from transformers import LlavaForConditionalGeneration, LlavaProcessor
3
  from PIL import Image
4
  import torch
 
5
  import warnings
6
  warnings.filterwarnings("ignore")
7
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  #Charger le modèle et le processeur
9
  model_id = "llava-hf/llava-1.5-7b-hf"
10
  #processor = LlavaProcessor.from_pretrained(model_id)
@@ -20,6 +33,8 @@ model = LlavaForConditionalGeneration.from_pretrained(
20
  def vqa_llava(image, question):
21
  if not isinstance(image, Image.Image):
22
  image = Image.open(image)
 
 
23
 
24
  prompt = f"[INST] {question} [/INST]"
25
  inputs = processor(prompt, image, return_tensors="pt").to(model.device)
 
2
  from transformers import LlavaForConditionalGeneration, LlavaProcessor
3
  from PIL import Image
4
  import torch
5
+ import io
6
  import warnings
7
  warnings.filterwarnings("ignore")
8
 
9
+
10
+ def safe_convert_image(img):
11
+ if isinstance(img, Image.Image):
12
+ return img
13
+ elif isinstance(img, bytes):
14
+ return Image.open(io.BytesIO(img))
15
+ elif hasattr(img, "read"): # Cas fichier Gradio
16
+ return Image.open(img)
17
+ else:
18
+ raise ValueError("Format d'image non pris en charge.")
19
+
20
+
21
  #Charger le modèle et le processeur
22
  model_id = "llava-hf/llava-1.5-7b-hf"
23
  #processor = LlavaProcessor.from_pretrained(model_id)
 
33
  def vqa_llava(image, question):
34
  if not isinstance(image, Image.Image):
35
  image = Image.open(image)
36
+ image = safe_convert_image(image)
37
+
38
 
39
  prompt = f"[INST] {question} [/INST]"
40
  inputs = processor(prompt, image, return_tensors="pt").to(model.device)