rajistics commited on
Commit
ae92be6
β€’
1 Parent(s): f87e896

adding underline

Browse files
Files changed (1) hide show
  1. app.py +2 -4
app.py CHANGED
@@ -81,19 +81,17 @@ def process_image(image):
81
 
82
  title = "Extracting Receipts: LayoutLMv3"
83
  description = """<p> Demo for Microsoft's LayoutLMv3, a Transformer for state-of-the-art document image understanding tasks. </p> <p> This particular model is fine-tuned from
84
- <a href="https://github.com/clovaai/cord">CORD</a> on the Consolidated Receipt Dataset, a dataset of receipts. If you search the πŸ€— Hugging Face hub you will see other related models fine-tuned for other documents. This model is trained using fine-tuning to look for entities around menu items, subtotal, and total prices. To perform your own fine-tuning, take a look at the
85
- <a href="https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LayoutLMv3">notebook by Niels</a>. </p> <p> To try it out, simply upload an image or use the example image below and click 'Submit'. Results will show up in a few seconds. To see the output bigger, right-click on it, select 'Open image in new tab', and use your browser's zoom feature. </p>"""
86
 
87
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2204.08387' target='_blank'>LayoutLMv3: Multi-modal Pre-training for Visually-Rich Document Understanding</a> | <a href='https://github.com/microsoft/unilm' target='_blank'>Github Repo</a></p>"
88
  examples =[['test0.jpeg'],['test1.jpeg'],['test2.jpeg']]
89
 
90
- css = "{ text-decoration: none; border-bottom:1px solid red; }"
91
  iface = gr.Interface(fn=process_image,
92
  inputs=gr.inputs.Image(type="pil"),
93
  outputs=gr.outputs.Image(type="pil", label="annotated image"),
94
  title=title,
95
  description=description,
96
  article=article,
97
- css = css,
98
  examples=examples)
99
  iface.launch(debug=True)
 
81
 
82
  title = "Extracting Receipts: LayoutLMv3"
83
  description = """<p> Demo for Microsoft's LayoutLMv3, a Transformer for state-of-the-art document image understanding tasks. </p> <p> This particular model is fine-tuned from
84
+ <a href="https://github.com/clovaai/cord" style="text-decoration: underline">CORD</a> on the Consolidated Receipt Dataset, a dataset of receipts. If you search the πŸ€— Hugging Face hub you will see other related models fine-tuned for other documents. This model is trained using fine-tuning to look for entities around menu items, subtotal, and total prices. To perform your own fine-tuning, take a look at the
85
+ <a href="https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LayoutLMv3" style="text-decoration: underline">notebook by Niels</a>. </p> <p> To try it out, simply upload an image or use the example image below and click 'Submit'. Results will show up in a few seconds. To see the output bigger, right-click on it, select 'Open image in new tab', and use your browser's zoom feature. </p>"""
86
 
87
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2204.08387' target='_blank'>LayoutLMv3: Multi-modal Pre-training for Visually-Rich Document Understanding</a> | <a href='https://github.com/microsoft/unilm' target='_blank'>Github Repo</a></p>"
88
  examples =[['test0.jpeg'],['test1.jpeg'],['test2.jpeg']]
89
 
 
90
  iface = gr.Interface(fn=process_image,
91
  inputs=gr.inputs.Image(type="pil"),
92
  outputs=gr.outputs.Image(type="pil", label="annotated image"),
93
  title=title,
94
  description=description,
95
  article=article,
 
96
  examples=examples)
97
  iface.launch(debug=True)