Commit
·
bf27af6
1
Parent(s):
8ea24ae
two
Browse files- README.md +37 -8
- demo_api.py +13 -3
README.md
CHANGED
@@ -1,13 +1,42 @@
|
|
1 |
---
|
2 |
-
language:
|
3 |
-
-
|
4 |
-
|
5 |
-
|
6 |
-
-
|
7 |
-
-
|
8 |
-
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
---
|
|
|
|
|
|
|
|
|
|
|
11 |
# ChatGLM-6B-INT4
|
12 |
<p align="center">
|
13 |
👋 Join our <a href="https://join.slack.com/t/chatglm/shared_invite/zt-1th2q5u69-7tURzFuOPanmuHy9hsZnKA" target="_blank">Slack</a> and <a href="https://github.com/THUDM/ChatGLM-6B/blob/main/resources/WECHAT.md" target="_blank">WeChat</a>
|
|
|
1 |
---
|
2 |
+
language: en
|
3 |
+
license: cc-by-4.0
|
4 |
+
datasets:
|
5 |
+
- squad_v2
|
6 |
+
model-index:
|
7 |
+
- name: deepset/roberta-base-squad2
|
8 |
+
results:
|
9 |
+
- task:
|
10 |
+
type: question-answering
|
11 |
+
name: Question Answering
|
12 |
+
dataset:
|
13 |
+
name: squad_v2
|
14 |
+
type: squad_v2
|
15 |
+
config: squad_v2
|
16 |
+
split: validation
|
17 |
+
metrics:
|
18 |
+
- type: exact_match
|
19 |
+
value: 79.9309
|
20 |
+
name: Exact Match
|
21 |
+
verified: true
|
22 |
+
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMDhhNjg5YzNiZGQ1YTIyYTAwZGUwOWEzZTRiYzdjM2QzYjA3ZTUxNDM1NjE1MTUyMjE1MGY1YzEzMjRjYzVjYiIsInZlcnNpb24iOjF9.EH5JJo8EEFwU7osPz3s7qanw_tigeCFhCXjSfyN0Y1nWVnSfulSxIk_DbAEI5iE80V4EKLyp5-mYFodWvL2KDA
|
23 |
+
- type: f1
|
24 |
+
value: 82.9501
|
25 |
+
name: F1
|
26 |
+
verified: true
|
27 |
+
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMjk5ZDYwOGQyNjNkMWI0OTE4YzRmOTlkY2JjNjQ0YTZkNTMzMzNkYTA0MDFmNmI3NjA3NjNlMjhiMDQ2ZjJjNSIsInZlcnNpb24iOjF9.DDm0LNTkdLbGsue58bg1aH_s67KfbcmkvL-6ZiI2s8IoxhHJMSf29H_uV2YLyevwx900t-MwTVOW3qfFnMMEAQ
|
28 |
+
- type: total
|
29 |
+
value: 11869
|
30 |
+
name: total
|
31 |
+
verified: true
|
32 |
+
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMGFkMmI2ODM0NmY5NGNkNmUxYWViOWYxZDNkY2EzYWFmOWI4N2VhYzY5MGEzMTVhOTU4Zjc4YWViOGNjOWJjMCIsInZlcnNpb24iOjF9.fexrU1icJK5_MiifBtZWkeUvpmFISqBLDXSQJ8E6UnrRof-7cU0s4tX_dIsauHWtUpIHMPZCf5dlMWQKXZuAAA
|
33 |
+
|
34 |
---
|
35 |
+
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
# ChatGLM-6B-INT4
|
41 |
<p align="center">
|
42 |
👋 Join our <a href="https://join.slack.com/t/chatglm/shared_invite/zt-1th2q5u69-7tURzFuOPanmuHy9hsZnKA" target="_blank">Slack</a> and <a href="https://github.com/THUDM/ChatGLM-6B/blob/main/resources/WECHAT.md" target="_blank">WeChat</a>
|
demo_api.py
CHANGED
@@ -1,6 +1,16 @@
|
|
1 |
-
from transformers import AutoModel,AutoTokenizer
|
2 |
-
tokenizer = AutoTokenizer.from_pretrained('NewBreaker/chatglm-6b-int4',trust_remote_code=True)
|
3 |
-
model = AutoModel.from_pretrained('NewBreaker/chatglm-6b-int4',trust_remote_code=True)
|
4 |
|
5 |
|
6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# from transformers import AutoModel,AutoTokenizer
|
2 |
+
# tokenizer = AutoTokenizer.from_pretrained('NewBreaker/chatglm-6b-int4',trust_remote_code=True)
|
3 |
+
# model = AutoModel.from_pretrained('NewBreaker/chatglm-6b-int4',trust_remote_code=True)
|
4 |
|
5 |
|
6 |
|
7 |
+
from transformers import AutoModelForQuestionAnswering
|
8 |
+
|
9 |
+
model = AutoModelForQuestionAnswering.from_pretrained("NewBreaker/chatglm-6b-int4")
|
10 |
+
|
11 |
+
|
12 |
+
# import requests
|
13 |
+
# api_url = "https://api-inference.huggingface.co/usage/pinned_models"
|
14 |
+
# headers = {"Authorization": f"Bearer {API_TOKEN}"}
|
15 |
+
# response = requests.get(api_url, headers=headers)
|
16 |
+
# # {"pinned_models": [...], "allowed_pinned_models": 5}
|