cahya commited on
Commit
d846aea
1 Parent(s): 3f4c277

disabled st.cache for the inference

Browse files
Files changed (1) hide show
  1. app/app.py +2 -1
app/app.py CHANGED
@@ -35,7 +35,8 @@ def get_generator(model_name: str):
35
  text_generator = pipeline('text-generation', model=model_name)
36
  return text_generator
37
 
38
- @st.cache(suppress_st_warning=True, hash_funcs={tokenizers.Tokenizer: id})
 
39
  def process(text_generator, text: str, max_length: int = 100, do_sample: bool = True, top_k: int = 50, top_p: float = 0.95,
40
  temperature: float = 1.0, max_time: float = 60.0, seed=42):
41
  # st.write("Cache miss: process")
 
35
  text_generator = pipeline('text-generation', model=model_name)
36
  return text_generator
37
 
38
+ # Disable the st.cache for this function due to issue on newer version of streamlit
39
+ # @st.cache(suppress_st_warning=True, hash_funcs={tokenizers.Tokenizer: id})
40
  def process(text_generator, text: str, max_length: int = 100, do_sample: bool = True, top_k: int = 50, top_p: float = 0.95,
41
  temperature: float = 1.0, max_time: float = 60.0, seed=42):
42
  # st.write("Cache miss: process")