Update README.md
Browse files
README.md
CHANGED
@@ -56,6 +56,7 @@ Start by installing the [library](https://huggingface.co/docs/transformers): `pi
|
|
56 |
|
57 |
```python
|
58 |
import torch
|
|
|
59 |
from transformers import AutoTokenizer, AutoModel
|
60 |
|
61 |
queries = ["Ceci est un exemple de requête.", "Voici un second exemple."]
|
@@ -71,11 +72,11 @@ with torch.no_grad():
|
|
71 |
q_output = model(**q_input)
|
72 |
p_output = model(**p_input)
|
73 |
|
74 |
-
q_activations = torch.amax(torch.log1p(
|
75 |
-
p_activations = torch.amax(torch.log1p(
|
76 |
|
77 |
-
q_activations =
|
78 |
-
p_activations =
|
79 |
|
80 |
similarity = q_embeddings @ p_embeddings.T
|
81 |
print(similarity)
|
|
|
56 |
|
57 |
```python
|
58 |
import torch
|
59 |
+
from torch.nn.functional import relu, normalize
|
60 |
from transformers import AutoTokenizer, AutoModel
|
61 |
|
62 |
queries = ["Ceci est un exemple de requête.", "Voici un second exemple."]
|
|
|
72 |
q_output = model(**q_input)
|
73 |
p_output = model(**p_input)
|
74 |
|
75 |
+
q_activations = torch.amax(torch.log1p(relu(q_output.logits * q_input['attention_mask'].unsqueeze(-1))), dim=1)
|
76 |
+
p_activations = torch.amax(torch.log1p(relu(p_output.logits * p_input['attention_mask'].unsqueeze(-1))), dim=1)
|
77 |
|
78 |
+
q_activations = normalize(q_activations, p=2, dim=1)
|
79 |
+
p_activations = normalize(p_activations, p=2, dim=1)
|
80 |
|
81 |
similarity = q_embeddings @ p_embeddings.T
|
82 |
print(similarity)
|