WYBar commited on
Commit
d3c9995
·
1 Parent(s): d835c19

delete print model.device

Browse files
Files changed (1) hide show
  1. app.py +47 -43
app.py CHANGED
@@ -361,6 +361,7 @@ def construction_all():
361
  @torch.no_grad()
362
  @spaces.GPU(duration=120)
363
  def evaluate_v1(inputs, model, quantizer, tokenizer, width, height, do_sample=False, temperature=1.0, top_p=1.0, top_k=50):
 
364
  print(f"evaluate_v1 {model.device} {model.lm.device} {pipeline.device}")
365
  model = model.to("cuda")
366
  print(f"after evaluate_v1 {model.device} {model.lm.device} {pipeline.device}")
@@ -406,6 +407,7 @@ def evaluate_v1(inputs, model, quantizer, tokenizer, width, height, do_sample=Fa
406
  return pred_json_example
407
 
408
  def inference(generate_method, intention, model, quantizer, tokenizer, width, height, do_sample=True, temperature=1.0, top_p=1.0, top_k=50):
 
409
  rawdata = {}
410
  rawdata["wholecaption"] = intention
411
  rawdata["layout"] = []
@@ -414,7 +416,7 @@ def inference(generate_method, intention, model, quantizer, tokenizer, width, he
414
  max_try_time = 5
415
  preddata = None
416
  while preddata is None and max_try_time > 0:
417
- print(f"inference {model.device} {model.lm.device} {pipeline.device}")
418
  preddata = evaluate_v1(rawdata, model, quantizer, tokenizer, width, height, do_sample=do_sample, temperature=temperature, top_p=top_p, top_k=top_k)
419
  max_try_time -= 1
420
  else:
@@ -423,6 +425,50 @@ def inference(generate_method, intention, model, quantizer, tokenizer, width, he
423
 
424
  return preddata
425
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
426
  @spaces.GPU(duration=120)
427
  def test_one_sample(validation_box, validation_prompt, true_gs, inference_steps, pipeline, generator, transp_vae):
428
  print(validation_box)
@@ -515,48 +561,6 @@ def process_svg(text_input, tuple_input, seed, true_gs, inference_steps):
515
  """
516
 
517
  return result_images, svg_file_path, svg_editor
518
-
519
- def process_preddate(intention, temperature, top_p, generate_method='v1'):
520
- intention = intention.replace('\n', '').replace('\r', '').replace('\\', '')
521
- intention = ensure_space_after_period(intention)
522
- print(f"process_preddate: {model.lm.device}")
523
- if temperature == 0.0:
524
- # print("looking for greedy decoding strategies, set `do_sample=False`.")
525
- # preddata = inference_partial(generate_method, intention, do_sample=False)
526
- preddata = inference(generate_method, intention, model=model, quantizer=quantizer, tokenizer=tokenizer, width=512, height=512, do_sample=False)
527
- else:
528
- # preddata = inference_partial(generate_method, intention, temperature=temperature, top_p=top_p)
529
- preddata = inference(generate_method, intention, model=model, quantizer=quantizer, tokenizer=tokenizer, width=512, height=512, temperature=temperature, top_p=top_p)
530
-
531
- layouts = preddata["layout"]
532
- list_box = []
533
- for i, layout in enumerate(layouts):
534
- x, y = layout["x"], layout["y"]
535
- width, height = layout["width"], layout["height"]
536
- if i == 0:
537
- list_box.append((0, 0, width, height))
538
- list_box.append((0, 0, width, height))
539
- else:
540
- left = x - width // 2
541
- top = y - height // 2
542
- right = x + width // 2
543
- bottom = y + height // 2
544
- list_box.append((left, top, right, bottom))
545
-
546
- # print(list_box)
547
- filtered_boxes = list_box[:2]
548
- for i in range(2, len(list_box)):
549
- keep = True
550
- for j in range(1, len(filtered_boxes)):
551
- iou = calculate_iou(list_box[i], filtered_boxes[j])
552
- if iou > 0.65:
553
- print(list_box[i], filtered_boxes[j])
554
- keep = False
555
- break
556
- if keep:
557
- filtered_boxes.append(list_box[i])
558
-
559
- return str(filtered_boxes), intention, str(filtered_boxes)
560
 
561
  def main():
562
  construction_all()
 
361
  @torch.no_grad()
362
  @spaces.GPU(duration=120)
363
  def evaluate_v1(inputs, model, quantizer, tokenizer, width, height, do_sample=False, temperature=1.0, top_p=1.0, top_k=50):
364
+ print(f"evaluate_v1")
365
  print(f"evaluate_v1 {model.device} {model.lm.device} {pipeline.device}")
366
  model = model.to("cuda")
367
  print(f"after evaluate_v1 {model.device} {model.lm.device} {pipeline.device}")
 
407
  return pred_json_example
408
 
409
  def inference(generate_method, intention, model, quantizer, tokenizer, width, height, do_sample=True, temperature=1.0, top_p=1.0, top_k=50):
410
+ print(f"start inference")
411
  rawdata = {}
412
  rawdata["wholecaption"] = intention
413
  rawdata["layout"] = []
 
416
  max_try_time = 5
417
  preddata = None
418
  while preddata is None and max_try_time > 0:
419
+ print(f"preddata = evaluate_v1")
420
  preddata = evaluate_v1(rawdata, model, quantizer, tokenizer, width, height, do_sample=do_sample, temperature=temperature, top_p=top_p, top_k=top_k)
421
  max_try_time -= 1
422
  else:
 
425
 
426
  return preddata
427
 
428
+ def process_preddate(intention, temperature, top_p, generate_method='v1'):
429
+ intention = intention.replace('\n', '').replace('\r', '').replace('\\', '')
430
+ intention = ensure_space_after_period(intention)
431
+ print(f"process_preddate")
432
+ if temperature == 0.0:
433
+ # print("looking for greedy decoding strategies, set `do_sample=False`.")
434
+ # preddata = inference_partial(generate_method, intention, do_sample=False)
435
+ print(f"preddata = inference temperatrue = 0.0")
436
+ preddata = inference(generate_method, intention, model=model, quantizer=quantizer, tokenizer=tokenizer, width=512, height=512, do_sample=False)
437
+ else:
438
+ # preddata = inference_partial(generate_method, intention, temperature=temperature, top_p=top_p)
439
+ print(f"preddata = inference temperatrue != 0.0")
440
+ preddata = inference(generate_method, intention, model=model, quantizer=quantizer, tokenizer=tokenizer, width=512, height=512, temperature=temperature, top_p=top_p)
441
+
442
+ layouts = preddata["layout"]
443
+ list_box = []
444
+ for i, layout in enumerate(layouts):
445
+ x, y = layout["x"], layout["y"]
446
+ width, height = layout["width"], layout["height"]
447
+ if i == 0:
448
+ list_box.append((0, 0, width, height))
449
+ list_box.append((0, 0, width, height))
450
+ else:
451
+ left = x - width // 2
452
+ top = y - height // 2
453
+ right = x + width // 2
454
+ bottom = y + height // 2
455
+ list_box.append((left, top, right, bottom))
456
+
457
+ # print(list_box)
458
+ filtered_boxes = list_box[:2]
459
+ for i in range(2, len(list_box)):
460
+ keep = True
461
+ for j in range(1, len(filtered_boxes)):
462
+ iou = calculate_iou(list_box[i], filtered_boxes[j])
463
+ if iou > 0.65:
464
+ print(list_box[i], filtered_boxes[j])
465
+ keep = False
466
+ break
467
+ if keep:
468
+ filtered_boxes.append(list_box[i])
469
+
470
+ return str(filtered_boxes), intention, str(filtered_boxes)
471
+
472
  @spaces.GPU(duration=120)
473
  def test_one_sample(validation_box, validation_prompt, true_gs, inference_steps, pipeline, generator, transp_vae):
474
  print(validation_box)
 
561
  """
562
 
563
  return result_images, svg_file_path, svg_editor
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
564
 
565
  def main():
566
  construction_all()