File size: 5,457 Bytes
3b0b6b3
 
8c4bc96
 
3b0b6b3
 
442c154
8c4bc96
 
3b0b6b3
19fccfa
 
 
 
e9eb71c
8c4bc96
19fccfa
 
 
 
 
8c4bc96
19fccfa
8c4bc96
d7c65f9
 
 
 
 
 
 
 
 
8c4bc96
d7c65f9
8c4bc96
d7c65f9
8c4bc96
 
d7c65f9
8c4bc96
d7c65f9
8c4bc96
 
d7c65f9
8c4bc96
d7c65f9
8c4bc96
 
d7c65f9
8c4bc96
d7c65f9
8c4bc96
 
d7c65f9
8c4bc96
d7c65f9
8c4bc96
2e46507
 
 
 
 
 
 
 
 
8c4bc96
2e46507
8c4bc96
2e46507
8c4bc96
 
2e46507
8c4bc96
2e46507
8c4bc96
 
2e46507
8c4bc96
2e46507
8c4bc96
 
2e46507
8c4bc96
2e46507
8c4bc96
 
2e46507
8c4bc96
2e46507
8c4bc96
3b0b6b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
---
language: en
tags:
- autotrain
datasets:
- lewtun/autotrain-data-acronym-identification
- acronym_identification
widget:
- text: I love AutoTrain 🤗
co2_eq_emissions: 10.435358044493652
model-index:
- name: autotrain-demo
  results:
  - task:
      type: token-classification
      name: Token Classification
    dataset:
      name: acronym_identification
      type: acronym_identification
      args: default
    metrics:
    - type: accuracy
      value: 0.9708090976211485
      name: Accuracy
  - task:
      type: token-classification
      name: Token Classification
    dataset:
      name: acronym_identification
      type: acronym_identification
      config: default
      split: train
    metrics:
    - type: accuracy
      value: 0.9790777669399117
      name: Accuracy
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiZGE4MjA4Yzc2MTNlY2U4NTY4MDkzODRkYzY4MjEzOGYzZGQ2ZGY3ZmI0YmU2YmEyMjY4Njc1YzhkYTFhZWU0ZiIsInZlcnNpb24iOjF9.rFLe74m-uhC-pM_WhnALYWQfDnpChZ7ujc0p75U3095TgeislAvvHYOpcYcepdmKq5Sqw61AiqqhGTsNbxCzCA
    - type: precision
      value: 0.9197835301644851
      name: Precision
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMTM0MGM2MGEzNzIxMjljODM5ZTkzYTMxZmQxZWE4ZjcxMWYwOGE5NGM1ZWIwNTkzNDAzNWM2ZWQxNDZkMzU5ZCIsInZlcnNpb24iOjF9.ghxhIgnPdqAdtyEn5TL_F_f7vp2TVmYUVxC_BzbG37DZpFS0PetO12fsNEydh4l6qf2V0gKoCY-6gsxx8uYoDA
    - type: recall
      value: 0.946479027789208
      name: Recall
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiNTQxNmU4M2FlMGRjZjU4YzcxMjUyMGVmYzhiMDRkOTM4MTY2ZTY1ZGE4ZjQyN2I5MzQxNjEyZjIwNmU0ZDQ3MCIsInZlcnNpb24iOjF9.hjNZ1PQ2fXMZ44rcd0Xb_NGXMyXYB-EaYyZg8GYWkVt0B89PU6lCnHORPjK8U1QFmcSeP8O_Xwvvr39GgPpYBg
    - type: f1
      value: 0.9329403493591477
      name: F1
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMzk4YTQ2Mzk5NTNlM2M0NDhmYzQ4NDlhOGUzNjc2ZWVmNzhhYWM4ZmE1ZjA3NzAyMDU2MDc3NTVlNzAyZTk3ZCIsInZlcnNpb24iOjF9.QjF8xwoC_ziNUwhzz-cd_AiGaIhWcR7eQtd2JWF4v-ENA4K6hjj-c2R_FzP2hQKQ_OY1Wq2C4E3ZF8ze13XlCw
    - type: loss
      value: 0.06360606849193573
      name: loss
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiYjlkZmNlMGI4MGQ1MmU1MDZjMmNhZDA4NmY3M2ZmNzk1ZTA2ODFjYTAwYjJiZjQwMmY2ZDAzMDYzYjQ3MDhjMiIsInZlcnNpb24iOjF9.J1yJobGoluZgQMtdNcexCPVGPido4rmjpdcU53HvS2Gf-Z8SGzY4NfVxg-0jREqJQbCQ3EP_37CsP5NEClpMDg
  - task:
      type: token-classification
      name: Token Classification
    dataset:
      name: acronym_identification
      type: acronym_identification
      config: default
      split: validation
    metrics:
    - type: accuracy
      value: 0.9758354452761242
      name: Accuracy
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiNjY5OGMzNzg1Mjg4YmFlOWEwOGVkZmM2OTQwYTUyMzRmZWNiZjFmNmM0OGU0MmM2Y2YwZGM1M2EyNDg5NTNhYSIsInZlcnNpb24iOjF9.ttERWzOkI_iUeEzv63lMNYvzdlU0q3gbqYL41_ZjZ-VKrIVMgEfsZCN2ldPJWPZaPiW_UnrUpbBLFTPieIQ-Dw
    - type: precision
      value: 0.9339674814732883
      name: Precision
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMTA2YjFhMDc0MDU2NDQ0MWNkYjdjMjBjNTllMDRlNjdmNDk4MGVjOTI4MzdjYjc2M2NkNzUxMTczMzk4NGU2MCIsInZlcnNpb24iOjF9.dPhHuCcAHPnS_cvOPiERU6myfNTlVVrd64jU_EYAK7rXMEWQ_IrOLgvViwrDjp9JVb-UEgO-7SmtPUlBqzfyCA
    - type: recall
      value: 0.9159344831326608
      name: Recall
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiNTg1MjVlYmNiN2Q3MjVmZTY4NDE4MjI2NTcyYjUxY2RkODI1MzRhOWE0MjBjNDE4ZDhlNWMyNGFjOGE2ODhiNCIsInZlcnNpb24iOjF9.mFsDy4rHaIByjBHsk1HSjj56NA8oPOXwOlcjDsqkKd0hJh1viYEIgwJn22PL62nBJvOYdnUB9HZDYvlM_uOuDw
    - type: f1
      value: 0.9248630887185104
      name: F1
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiNGM4MDY2MTQxODVlYjBlMGM0YThhZmQ1YjNiNTM3YjZmODdlNjM5OGM4NDE2YTdhYWNiMzM0ZDRiMjIyNWQ2MCIsInZlcnNpb24iOjF9.ehbT56xqKXykHiyau95BLHLjfJcSi8OtsG6TDeDbyjggNN-WnOu7zz2kquR_mX9vcm3aJkbsXVA1eB6tH3cZDg
    - type: loss
      value: 0.07593930512666702
      name: loss
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiNmM3NjViNzA3MWJhNzQzZThlMjQ1ZDdjMzAyMjZkZWVhYjUxMDhiZDYwMDJjYTBlNGQ4MDk0YzczOWYyNjNiOCIsInZlcnNpb24iOjF9.OzgQm9WJ7WXJVjkNyfP8UEKeN0j-XTww2AGPY8QOlm3c2bAVa23_cf4J-44yvCiHWuqK5kKVE04NjRjfq39zCg
---

# Model Trained Using AutoTrain

- Problem type: Entity Extraction
- Model ID: 7324788
- CO2 Emissions (in grams): 10.435358044493652

## Validation Metrics

- Loss: 0.08991389721632004
- Accuracy: 0.9708090976211485
- Precision: 0.8998421675654347
- Recall: 0.9309429854401959
- F1: 0.9151284109149278

## Usage

You can use cURL to access this model:

```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/lewtun/autotrain-acronym-identification-7324788
```

Or Python API:

```
from transformers import AutoModelForTokenClassification, AutoTokenizer

model = AutoModelForTokenClassification.from_pretrained("lewtun/autotrain-acronym-identification-7324788", use_auth_token=True)

tokenizer = AutoTokenizer.from_pretrained("lewtun/autotrain-acronym-identification-7324788", use_auth_token=True)

inputs = tokenizer("I love AutoTrain", return_tensors="pt")

outputs = model(**inputs)
```