Update app.py
Browse files
app.py
CHANGED
|
@@ -25,6 +25,8 @@ from nltk import sent_tokenize
|
|
| 25 |
|
| 26 |
warnings.filterwarnings("ignore")
|
| 27 |
|
|
|
|
|
|
|
| 28 |
def extract_text_from_url(url: str):
|
| 29 |
|
| 30 |
'''Extract text from url'''
|
|
@@ -128,7 +130,7 @@ def bi_encode(bi_enc,passages):
|
|
| 128 |
|
| 129 |
global bi_encoder
|
| 130 |
#We use the Bi-Encoder to encode all passages, so that we can use it with sematic search
|
| 131 |
-
bi_encoder = SentenceTransformer(bi_enc)
|
| 132 |
|
| 133 |
#quantize the model
|
| 134 |
#bi_encoder = quantize_dynamic(model, {Linear, Embedding})
|
|
|
|
| 25 |
|
| 26 |
warnings.filterwarnings("ignore")
|
| 27 |
|
| 28 |
+
auth_token = os.environ.get("auth_token")
|
| 29 |
+
|
| 30 |
def extract_text_from_url(url: str):
|
| 31 |
|
| 32 |
'''Extract text from url'''
|
|
|
|
| 130 |
|
| 131 |
global bi_encoder
|
| 132 |
#We use the Bi-Encoder to encode all passages, so that we can use it with sematic search
|
| 133 |
+
bi_encoder = SentenceTransformer(bi_enc,use_auth_token=auth_token)
|
| 134 |
|
| 135 |
#quantize the model
|
| 136 |
#bi_encoder = quantize_dynamic(model, {Linear, Embedding})
|