WYBar commited on
Commit
e8292cf
·
1 Parent(s): 736a804
__pycache__/custom_model_mmdit.cpython-310.pyc ADDED
Binary file (10.8 kB). View file
 
__pycache__/custom_model_transp_vae.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
__pycache__/custom_pipeline.cpython-310.pyc ADDED
Binary file (18 kB). View file
 
__pycache__/modeling_crello.cpython-310.pyc ADDED
Binary file (6.4 kB). View file
 
__pycache__/quantizer.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
app.py CHANGED
@@ -333,10 +333,11 @@ def construction_layout():
333
  # quantizer = quantizer.to("cuda")
334
  # tokenizer = tokenizer.to("cuda")
335
  model.lm = model.lm.to("cuda")
 
336
  return model, quantizer, tokenizer, params_dict["width"], params_dict["height"], device
337
 
338
  @torch.no_grad()
339
- @spaces.GPU(enable_queue=True, duration=60)
340
  def evaluate_v1(inputs, model, quantizer, tokenizer, width, height, device, do_sample=False, temperature=1.0, top_p=1.0, top_k=50):
341
  json_example = inputs
342
  input_intension = '{"wholecaption":"' + json_example["wholecaption"] + '","layout":[{"layer":'
@@ -344,14 +345,17 @@ def evaluate_v1(inputs, model, quantizer, tokenizer, width, height, device, do_s
344
  inputs = tokenizer(
345
  input_intension, return_tensors="pt"
346
  ).to(model.lm.device)
 
347
  print("tokenizer2")
348
 
349
  stopping_criteria = StoppingCriteriaList()
350
  stopping_criteria.append(StopAtSpecificTokenCriteria(token_id_list=[128000]))
351
 
352
  print("lm1")
 
353
  outputs = model.lm.generate(**inputs, use_cache=True, max_length=8000, stopping_criteria=stopping_criteria, do_sample=do_sample, temperature=temperature, top_p=top_p, top_k=top_k)
354
  print("lm2")
 
355
  inputs_length = inputs['input_ids'].shape[1]
356
  outputs = outputs[:, inputs_length:]
357
 
@@ -427,7 +431,7 @@ def construction():
427
 
428
  return pipeline, transp_vae
429
 
430
- @spaces.GPU(enable_queue=True, duration=60)
431
  def test_one_sample(validation_box, validation_prompt, true_gs, inference_steps, pipeline, generator, transp_vae):
432
  print(validation_box)
433
  output, rgba_output, _, _ = pipeline(
@@ -474,7 +478,17 @@ def svg_test_one_sample(validation_prompt, validation_box_str, seed, true_gs, in
474
  svg_file_path = './image.svg'
475
  os.makedirs(os.path.dirname(svg_file_path), exist_ok=True)
476
  with open(svg_file_path, 'w', encoding='utf-8') as f:
477
- f.write(svg_img)
 
 
 
 
 
 
 
 
 
 
478
 
479
  return result_images, svg_file_path
480
 
 
333
  # quantizer = quantizer.to("cuda")
334
  # tokenizer = tokenizer.to("cuda")
335
  model.lm = model.lm.to("cuda")
336
+ print(model.lm.device)
337
  return model, quantizer, tokenizer, params_dict["width"], params_dict["height"], device
338
 
339
  @torch.no_grad()
340
+ @spaces.GPU(duration=60)
341
  def evaluate_v1(inputs, model, quantizer, tokenizer, width, height, device, do_sample=False, temperature=1.0, top_p=1.0, top_k=50):
342
  json_example = inputs
343
  input_intension = '{"wholecaption":"' + json_example["wholecaption"] + '","layout":[{"layer":'
 
345
  inputs = tokenizer(
346
  input_intension, return_tensors="pt"
347
  ).to(model.lm.device)
348
+ print(inputs.device)
349
  print("tokenizer2")
350
 
351
  stopping_criteria = StoppingCriteriaList()
352
  stopping_criteria.append(StopAtSpecificTokenCriteria(token_id_list=[128000]))
353
 
354
  print("lm1")
355
+ print(model.lm.device)
356
  outputs = model.lm.generate(**inputs, use_cache=True, max_length=8000, stopping_criteria=stopping_criteria, do_sample=do_sample, temperature=temperature, top_p=top_p, top_k=top_k)
357
  print("lm2")
358
+
359
  inputs_length = inputs['input_ids'].shape[1]
360
  outputs = outputs[:, inputs_length:]
361
 
 
431
 
432
  return pipeline, transp_vae
433
 
434
+ @spaces.GPU(duration=60)
435
  def test_one_sample(validation_box, validation_prompt, true_gs, inference_steps, pipeline, generator, transp_vae):
436
  print(validation_box)
437
  output, rgba_output, _, _ = pipeline(
 
478
  svg_file_path = './image.svg'
479
  os.makedirs(os.path.dirname(svg_file_path), exist_ok=True)
480
  with open(svg_file_path, 'w', encoding='utf-8') as f:
481
+ f.write(svg_img)
482
+
483
+ if not isinstance(result_images, list):
484
+ raise TypeError("result_images 必须是一个列表")
485
+ else:
486
+ print(len(result_images))
487
+
488
+ if not os.path.exists(svg_file_path):
489
+ raise FileNotFoundError(f"文件 {svg_file_path} 未创建")
490
+ if os.path.getsize(svg_file_path) == 0:
491
+ raise ValueError(f"文件 {svg_file_path} 内容为空")
492
 
493
  return result_images, svg_file_path
494
 
image.svg ADDED
modeling_crello.py CHANGED
@@ -196,6 +196,7 @@ class CrelloModel(PreTrainedModel):
196
  self,
197
  labels: torch.LongTensor,
198
  ):
 
199
  batch_size = labels.shape[0]
200
  full_labels = labels.detach().clone()
201
 
@@ -219,10 +220,12 @@ class CrelloModel(PreTrainedModel):
219
  pad_idx.append(k + 1)
220
  assert len(pad_idx) == batch_size, (len(pad_idx), batch_size)
221
 
 
222
  output = self.lm( inputs_embeds=input_embs,
223
  # input_ids=labels,
224
  labels=full_labels,
225
  output_hidden_states=True)
 
226
 
227
  return output, full_labels, input_embs_norm
228
 
 
196
  self,
197
  labels: torch.LongTensor,
198
  ):
199
+ print("inside Crello")
200
  batch_size = labels.shape[0]
201
  full_labels = labels.detach().clone()
202
 
 
220
  pad_idx.append(k + 1)
221
  assert len(pad_idx) == batch_size, (len(pad_idx), batch_size)
222
 
223
+ print("inside Crello, lm1")
224
  output = self.lm( inputs_embeds=input_embs,
225
  # input_ids=labels,
226
  labels=full_labels,
227
  output_hidden_states=True)
228
+ print("inside Crello, lm2")
229
 
230
  return output, full_labels, input_embs_norm
231