Luigi commited on
Commit
bc1bd75
·
verified ·
1 Parent(s): 74c66e7

add parser_model_ner_gemma_v0 based on gemma 3 370m it

Browse files
Files changed (1) hide show
  1. app.py +4 -0
app.py CHANGED
@@ -28,6 +28,10 @@ cancel_event = threading.Event()
28
  MODELS = {
29
  # … your existing entries …
30
  "Qwen2.5-Taiwan-1.5B-Instruct": {"repo_id": "benchang1110/Qwen2.5-Taiwan-1.5B-Instruct", "description": "Qwen2.5-Taiwan-1.5B-Instruct"},
 
 
 
 
31
  "Gemma-3-Taiwan-270M-it":{
32
  "repo_id":"lianghsun/Gemma-3-Taiwan-270M-it",
33
  "description": "google/gemma-3-270m-it fintuned on Taiwan Chinese dataset"
 
28
  MODELS = {
29
  # … your existing entries …
30
  "Qwen2.5-Taiwan-1.5B-Instruct": {"repo_id": "benchang1110/Qwen2.5-Taiwan-1.5B-Instruct", "description": "Qwen2.5-Taiwan-1.5B-Instruct"},
31
+ "parser_model_ner_gemma_v0.1": {
32
+ "repo_id": "myfi/parser_model_ner_gemma_v0.1",
33
+ "description": "A lightweight named‑entity‑like (NER) parser fine‑tuned from Google’s **Gemma‑3‑270M** model. The base Gemma‑3‑270M is a 270 M‑parameter, hyper‑efficient LLM designed for on‑device inference, supporting >140 languages, a 128 k‑token context window, and instruction‑following capabilities [2][7]. This variant is further trained on standard NER corpora (e.g., CoNLL‑2003, OntoNotes) to extract PERSON, ORG, LOC, and MISC entities with high precision while keeping the memory footprint low (≈240 MB VRAM in BF16 quantized form) [1]. It is released under the Apache‑2.0 license and can be used for fast, cost‑effective entity extraction in low‑resource environments."
34
+ },
35
  "Gemma-3-Taiwan-270M-it":{
36
  "repo_id":"lianghsun/Gemma-3-Taiwan-270M-it",
37
  "description": "google/gemma-3-270m-it fintuned on Taiwan Chinese dataset"