awacke1 commited on
Commit
c91a3e7
·
verified ·
1 Parent(s): 99c9d35

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -15
app.py CHANGED
@@ -2,23 +2,22 @@ import os
2
  import gradio as gr
3
  import torch
4
  import PIL
5
-
6
- from flamingo_mini import FlamingoConfig, FlamingoModel, FlamingoProcessor
7
-
8
-
9
 
10
  EXAMPLES_DIR = 'examples'
11
  DEFAULT_PROMPT = "<image>"
12
 
13
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
14
 
15
- model = FlamingoModel.from_pretrained('dhansmair/flamingo-mini')
 
16
  model.to(device)
17
  model.eval()
18
 
19
- processor = FlamingoProcessor(model.config, device=device)
 
20
 
21
- # setup some example images
22
  examples = []
23
  if os.path.isdir(EXAMPLES_DIR):
24
  for file in os.listdir(EXAMPLES_DIR):
@@ -29,10 +28,10 @@ if os.path.isdir(EXAMPLES_DIR):
29
  def predict_caption(image, prompt):
30
  assert isinstance(prompt, str)
31
 
32
- caption = model.generate_captions(
33
- processor,
34
- images=image,
35
- prompt=prompt
36
  )
37
 
38
  if isinstance(caption, list):
@@ -41,9 +40,11 @@ def predict_caption(image, prompt):
41
  return caption
42
 
43
 
44
- iface = gr.Interface(fn=predict_caption,
45
- inputs=[gr.Image(type="pil"), gr.Textbox(value=DEFAULT_PROMPT, label="Prompt")],
46
- examples=examples,
47
- outputs="text")
 
 
48
 
49
  iface.launch(debug=True)
 
2
  import gradio as gr
3
  import torch
4
  import PIL
5
+ from transformers import AutoProcessor, AutoModelForCausalLM # Using AutoModel classes
 
 
 
6
 
7
  EXAMPLES_DIR = 'examples'
8
  DEFAULT_PROMPT = "<image>"
9
 
10
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
11
 
12
+ # Load model using AutoModel with trust_remote_code=True
13
+ model = AutoModelForCausalLM.from_pretrained('dhansmair/flamingo-mini', trust_remote_code=True)
14
  model.to(device)
15
  model.eval()
16
 
17
+ # Initialize processor without the `device` argument
18
+ processor = AutoProcessor.from_pretrained('dhansmair/flamingo-mini')
19
 
20
+ # Setup some example images
21
  examples = []
22
  if os.path.isdir(EXAMPLES_DIR):
23
  for file in os.listdir(EXAMPLES_DIR):
 
28
  def predict_caption(image, prompt):
29
  assert isinstance(prompt, str)
30
 
31
+ # Process the image using the model
32
+ caption = model.generate(
33
+ processor(images=image, prompt=prompt), # Pass processed inputs to the model
34
+ max_length=50
35
  )
36
 
37
  if isinstance(caption, list):
 
40
  return caption
41
 
42
 
43
+ iface = gr.Interface(
44
+ fn=predict_caption,
45
+ inputs=[gr.Image(type="pil"), gr.Textbox(value=DEFAULT_PROMPT, label="Prompt")],
46
+ examples=examples,
47
+ outputs="text"
48
+ )
49
 
50
  iface.launch(debug=True)