sitammeur commited on
Commit
13c9d4a
·
verified ·
1 Parent(s): 5d1d027

Update src/app/response.py

Browse files
Files changed (1) hide show
  1. src/app/response.py +12 -5
src/app/response.py CHANGED
@@ -9,11 +9,7 @@ from src.config import (
9
  model_name,
10
  sampling,
11
  stream,
12
- top_p,
13
- top_k,
14
- temperature,
15
  repetition_penalty,
16
- max_new_tokens,
17
  )
18
  from src.app.model import load_model_and_tokenizer
19
  from src.logger import logging
@@ -25,13 +21,24 @@ model, tokenizer, processor = load_model_and_tokenizer(model_name, device)
25
 
26
 
27
  @spaces.GPU(duration=120)
28
- def describe_image(image: str, question: str) -> str:
 
 
 
 
 
 
 
29
  """
30
  Generates an answer to a given question based on the provided image and question.
31
 
32
  Args:
33
  - image (str): The path to the image file.
34
  - question (str): The question text.
 
 
 
 
35
 
36
  Returns:
37
  str: The generated answer to the question.
 
9
  model_name,
10
  sampling,
11
  stream,
 
 
 
12
  repetition_penalty,
 
13
  )
14
  from src.app.model import load_model_and_tokenizer
15
  from src.logger import logging
 
21
 
22
 
23
  @spaces.GPU(duration=120)
24
+ def describe_image(
25
+ image: str,
26
+ question: str,
27
+ temperature: float,
28
+ top_p: float,
29
+ top_k: int,
30
+ max_new_tokens: int,
31
+ ) -> str:
32
  """
33
  Generates an answer to a given question based on the provided image and question.
34
 
35
  Args:
36
  - image (str): The path to the image file.
37
  - question (str): The question text.
38
+ - temperature (float): The temperature parameter for the model.
39
+ - top_p (float): The top_p parameter for the model.
40
+ - top_k (int): The top_k parameter for the model.
41
+ - max_new_tokens (int): The max tokens to be generated by the model.
42
 
43
  Returns:
44
  str: The generated answer to the question.