HaisuGuan commited on
Commit
33273a9
·
1 Parent(s): c2b4dee
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -81,20 +81,18 @@ def sepia(image_web, seed):
81
  np.random.seed(seed)
82
  if torch.cuda.is_available():
83
  torch.cuda.manual_seed_all(seed)
84
- image = web_input(image_web)
85
  output_image = model.web_restore(image, r=config.data.grid_r)
86
  grid = make_grid(output_image)
87
  ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy()
88
- # torch.cuda.empty_cache()
89
- return ndarr
90
 
91
 
92
  demo = gr.Interface(sepia,
93
  inputs=[gr.Image(label="输入甲骨文图片", height=600, width=600), gr.Number(label="随机种子")],
94
- outputs=gr.Image(label="输出汉字图片", height=600, width=600),
95
  title=title_markdown)
96
  demo.queue().launch(
97
- server_name="127.0.0.1",
98
- server_port=7681,
99
  share=True
100
  )
 
81
  np.random.seed(seed)
82
  if torch.cuda.is_available():
83
  torch.cuda.manual_seed_all(seed)
84
+ image, image_tmp = web_input(image_web)
85
  output_image = model.web_restore(image, r=config.data.grid_r)
86
  grid = make_grid(output_image)
87
  ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy()
88
+ torch.cuda.empty_cache()
89
+ return ndarr, image_tmp
90
 
91
 
92
  demo = gr.Interface(sepia,
93
  inputs=[gr.Image(label="输入甲骨文图片", height=600, width=600), gr.Number(label="随机种子")],
94
+ outputs=[gr.Image(label="输出汉字图片", height=600, width=600), gr.Image(label="矫正后甲骨文图片", height=600, width=600)],
95
  title=title_markdown)
96
  demo.queue().launch(
 
 
97
  share=True
98
  )