Spaces:
Running
Running
Update Space (evaluate main: d5ecbe47)
Browse files- README.md +9 -0
- requirements.txt +1 -1
- rouge.py +17 -2
README.md
CHANGED
|
@@ -42,6 +42,15 @@ At minimum, this metric takes as input a list of predictions and a list of refer
|
|
| 42 |
{'rouge1': 1.0, 'rouge2': 1.0, 'rougeL': 1.0, 'rougeLsum': 1.0}
|
| 43 |
```
|
| 44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
It can also deal with lists of references for each predictions:
|
| 46 |
```python
|
| 47 |
>>> rouge = evaluate.load('rouge')
|
|
|
|
| 42 |
{'rouge1': 1.0, 'rouge2': 1.0, 'rougeL': 1.0, 'rougeLsum': 1.0}
|
| 43 |
```
|
| 44 |
|
| 45 |
+
One can also pass a custom tokenizer which is especially useful for non-latin languages.
|
| 46 |
+
```python
|
| 47 |
+
>>> results = rouge.compute(predictions=predictions,
|
| 48 |
+
... references=references,
|
| 49 |
+
tokenizer=lambda x: x.split())
|
| 50 |
+
>>> print(results)
|
| 51 |
+
{'rouge1': 1.0, 'rouge2': 1.0, 'rougeL': 1.0, 'rougeLsum': 1.0}
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
It can also deal with lists of references for each predictions:
|
| 55 |
```python
|
| 56 |
>>> rouge = evaluate.load('rouge')
|
requirements.txt
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
git+https://github.com/huggingface/evaluate@
|
| 2 |
absl-py
|
| 3 |
nltk
|
| 4 |
rouge_score>=0.1.2
|
|
|
|
| 1 |
+
git+https://github.com/huggingface/evaluate@d5ecbe472557e6ec3cf6173e6fe9b4fe67c4919e
|
| 2 |
absl-py
|
| 3 |
nltk
|
| 4 |
rouge_score>=0.1.2
|
rouge.py
CHANGED
|
@@ -80,6 +80,16 @@ Examples:
|
|
| 80 |
"""
|
| 81 |
|
| 82 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
| 84 |
class Rouge(evaluate.Metric):
|
| 85 |
def _info(self):
|
|
@@ -108,13 +118,18 @@ class Rouge(evaluate.Metric):
|
|
| 108 |
],
|
| 109 |
)
|
| 110 |
|
| 111 |
-
def _compute(
|
|
|
|
|
|
|
| 112 |
if rouge_types is None:
|
| 113 |
rouge_types = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
|
| 114 |
|
| 115 |
multi_ref = isinstance(references[0], list)
|
| 116 |
|
| 117 |
-
|
|
|
|
|
|
|
|
|
|
| 118 |
if use_aggregator:
|
| 119 |
aggregator = scoring.BootstrapAggregator()
|
| 120 |
else:
|
|
|
|
| 80 |
"""
|
| 81 |
|
| 82 |
|
| 83 |
+
class Tokenizer:
|
| 84 |
+
"""Helper class to wrap a callable into a class with a `tokenize` method as used by rouge-score."""
|
| 85 |
+
|
| 86 |
+
def __init__(self, tokenizer_func):
|
| 87 |
+
self.tokenizer_func = tokenizer_func
|
| 88 |
+
|
| 89 |
+
def tokenize(self, text):
|
| 90 |
+
return self.tokenizer_func(text)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
| 94 |
class Rouge(evaluate.Metric):
|
| 95 |
def _info(self):
|
|
|
|
| 118 |
],
|
| 119 |
)
|
| 120 |
|
| 121 |
+
def _compute(
|
| 122 |
+
self, predictions, references, rouge_types=None, use_aggregator=True, use_stemmer=False, tokenizer=None
|
| 123 |
+
):
|
| 124 |
if rouge_types is None:
|
| 125 |
rouge_types = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
|
| 126 |
|
| 127 |
multi_ref = isinstance(references[0], list)
|
| 128 |
|
| 129 |
+
if tokenizer is not None:
|
| 130 |
+
tokenizer = Tokenizer(tokenizer)
|
| 131 |
+
|
| 132 |
+
scorer = rouge_scorer.RougeScorer(rouge_types=rouge_types, use_stemmer=use_stemmer, tokenizer=tokenizer)
|
| 133 |
if use_aggregator:
|
| 134 |
aggregator = scoring.BootstrapAggregator()
|
| 135 |
else:
|