Update README.md
Browse files
README.md
CHANGED
@@ -2,12 +2,9 @@
|
|
2 |
language: ko
|
3 |
tags:
|
4 |
- roberta
|
5 |
-
- feature-extraction
|
6 |
- sentence-transformers
|
7 |
datasets:
|
8 |
- klue
|
9 |
-
inference:
|
10 |
-
- false
|
11 |
---
|
12 |
|
13 |
# KLUE RoBERTa base model for Sentence Embeddings
|
@@ -53,11 +50,11 @@ cos_scores = util.pytorch_cos_sim(query_embedding, document_embeddings)[0]
|
|
53 |
top_results = torch.topk(cos_scores, k=top_k)
|
54 |
|
55 |
print(f"입력 문장: {query}")
|
56 |
-
print(f"
|
57 |
-
<입력 문장과 유사한 {top_k} 개의
|
58 |
")
|
59 |
|
60 |
for i, (score, idx) in enumerate(zip(top_results[0], top_results[1])):
|
61 |
-
print(f"{i+1}: {docs[idx]} {'(유사도: {:.4f})'.format(score)}
|
62 |
")
|
63 |
```
|
|
|
2 |
language: ko
|
3 |
tags:
|
4 |
- roberta
|
|
|
5 |
- sentence-transformers
|
6 |
datasets:
|
7 |
- klue
|
|
|
|
|
8 |
---
|
9 |
|
10 |
# KLUE RoBERTa base model for Sentence Embeddings
|
|
|
50 |
top_results = torch.topk(cos_scores, k=top_k)
|
51 |
|
52 |
print(f"입력 문장: {query}")
|
53 |
+
print(f"\\\\
|
54 |
+
<입력 문장과 유사한 {top_k} 개의 문장>\\\\
|
55 |
")
|
56 |
|
57 |
for i, (score, idx) in enumerate(zip(top_results[0], top_results[1])):
|
58 |
+
print(f"{i+1}: {docs[idx]} {'(유사도: {:.4f})'.format(score)}\\\\
|
59 |
")
|
60 |
```
|