WYBar commited on
Commit
736a804
·
1 Parent(s): e729779
Files changed (2) hide show
  1. app.py +4 -0
  2. app_test.py +4 -0
app.py CHANGED
@@ -340,14 +340,18 @@ def construction_layout():
340
  def evaluate_v1(inputs, model, quantizer, tokenizer, width, height, device, do_sample=False, temperature=1.0, top_p=1.0, top_k=50):
341
  json_example = inputs
342
  input_intension = '{"wholecaption":"' + json_example["wholecaption"] + '","layout":[{"layer":'
 
343
  inputs = tokenizer(
344
  input_intension, return_tensors="pt"
345
  ).to(model.lm.device)
 
346
 
347
  stopping_criteria = StoppingCriteriaList()
348
  stopping_criteria.append(StopAtSpecificTokenCriteria(token_id_list=[128000]))
349
 
 
350
  outputs = model.lm.generate(**inputs, use_cache=True, max_length=8000, stopping_criteria=stopping_criteria, do_sample=do_sample, temperature=temperature, top_p=top_p, top_k=top_k)
 
351
  inputs_length = inputs['input_ids'].shape[1]
352
  outputs = outputs[:, inputs_length:]
353
 
 
340
  def evaluate_v1(inputs, model, quantizer, tokenizer, width, height, device, do_sample=False, temperature=1.0, top_p=1.0, top_k=50):
341
  json_example = inputs
342
  input_intension = '{"wholecaption":"' + json_example["wholecaption"] + '","layout":[{"layer":'
343
+ print("tokenizer1")
344
  inputs = tokenizer(
345
  input_intension, return_tensors="pt"
346
  ).to(model.lm.device)
347
+ print("tokenizer2")
348
 
349
  stopping_criteria = StoppingCriteriaList()
350
  stopping_criteria.append(StopAtSpecificTokenCriteria(token_id_list=[128000]))
351
 
352
+ print("lm1")
353
  outputs = model.lm.generate(**inputs, use_cache=True, max_length=8000, stopping_criteria=stopping_criteria, do_sample=do_sample, temperature=temperature, top_p=top_p, top_k=top_k)
354
+ print("lm2")
355
  inputs_length = inputs['input_ids'].shape[1]
356
  outputs = outputs[:, inputs_length:]
357
 
app_test.py CHANGED
@@ -340,14 +340,18 @@ def construction_layout():
340
  def evaluate_v1(inputs, model, quantizer, tokenizer, width, height, device, do_sample=False, temperature=1.0, top_p=1.0, top_k=50):
341
  json_example = inputs
342
  input_intension = '{"wholecaption":"' + json_example["wholecaption"] + '","layout":[{"layer":'
 
343
  inputs = tokenizer(
344
  input_intension, return_tensors="pt"
345
  ).to(model.lm.device)
 
346
 
347
  stopping_criteria = StoppingCriteriaList()
348
  stopping_criteria.append(StopAtSpecificTokenCriteria(token_id_list=[128000]))
349
 
 
350
  outputs = model.lm.generate(**inputs, use_cache=True, max_length=8000, stopping_criteria=stopping_criteria, do_sample=do_sample, temperature=temperature, top_p=top_p, top_k=top_k)
 
351
  inputs_length = inputs['input_ids'].shape[1]
352
  outputs = outputs[:, inputs_length:]
353
 
 
340
  def evaluate_v1(inputs, model, quantizer, tokenizer, width, height, device, do_sample=False, temperature=1.0, top_p=1.0, top_k=50):
341
  json_example = inputs
342
  input_intension = '{"wholecaption":"' + json_example["wholecaption"] + '","layout":[{"layer":'
343
+ print("tokenizer1")
344
  inputs = tokenizer(
345
  input_intension, return_tensors="pt"
346
  ).to(model.lm.device)
347
+ print("tokenizer2")
348
 
349
  stopping_criteria = StoppingCriteriaList()
350
  stopping_criteria.append(StopAtSpecificTokenCriteria(token_id_list=[128000]))
351
 
352
+ print("lm1")
353
  outputs = model.lm.generate(**inputs, use_cache=True, max_length=8000, stopping_criteria=stopping_criteria, do_sample=do_sample, temperature=temperature, top_p=top_p, top_k=top_k)
354
+ print("lm2")
355
  inputs_length = inputs['input_ids'].shape[1]
356
  outputs = outputs[:, inputs_length:]
357