Datasets:
				
			
			
	
			
			
	
		
		include training pairs (with sampled negative examples)
Browse files- link_synonyms-2018-thresh_10.csv → 2018thresh10corpus.csv +0 -0
 - 2018thresh10dev.csv +3 -0
 - 2018thresh10test.csv +3 -0
 - 2018thresh10train.csv +3 -0
 - link_synonyms-2018-thresh_20.csv → 2018thresh20corpus.csv +0 -0
 - 2018thresh20dev.csv +3 -0
 - 2018thresh20test.csv +3 -0
 - 2018thresh20train.csv +3 -0
 - link_synonyms-2018-thresh_5.csv → 2018thresh5corpus.csv +0 -0
 - 2018thresh5dev.csv +3 -0
 - 2018thresh5test.csv +3 -0
 - 2018thresh5train.csv +3 -0
 - generate_wes_data.py +76 -0
 - wiki-entity-similarity.py +62 -15
 
    	
        link_synonyms-2018-thresh_10.csv → 2018thresh10corpus.csv
    RENAMED
    
    | 
         
            File without changes
         
     | 
    	
        2018thresh10dev.csv
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:53c34495315acde41ca549eaa1ad02726e79c6f2aad46b77f84b11fb2459e666
         
     | 
| 3 | 
         
            +
            size 55066171
         
     | 
    	
        2018thresh10test.csv
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:8089f88487de91ed778ecfed210f0f13488e3f670404c210ebd72ee3f1b268de
         
     | 
| 3 | 
         
            +
            size 36685709
         
     | 
    	
        2018thresh10train.csv
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:2618a03dd0278017b987759f723cc7525d19ddc3c79247a187bcf0a2601663ed
         
     | 
| 3 | 
         
            +
            size 274277317
         
     | 
    	
        link_synonyms-2018-thresh_20.csv → 2018thresh20corpus.csv
    RENAMED
    
    | 
         
            File without changes
         
     | 
    	
        2018thresh20dev.csv
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:06c9b05cc92746020c8379d55d8738ca6923129ca8a15766df1687625d73cb30
         
     | 
| 3 | 
         
            +
            size 39942761
         
     | 
    	
        2018thresh20test.csv
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:feea8c3d46deb539e8004bc5193a15509cb3b352d93431dbe5131a5e4356b9d9
         
     | 
| 3 | 
         
            +
            size 26669268
         
     | 
    	
        2018thresh20train.csv
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:ebd8c98658d16de99412d9f52d3b98409c0bcefd36409378fe303d5dd61c9eba
         
     | 
| 3 | 
         
            +
            size 198305773
         
     | 
    	
        link_synonyms-2018-thresh_5.csv → 2018thresh5corpus.csv
    RENAMED
    
    | 
         
            File without changes
         
     | 
    	
        2018thresh5dev.csv
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:bf2e6b8bf0b091f3cbcc3cfbdd831989e35be1f8331de0a2f0d217e574c189c4
         
     | 
| 3 | 
         
            +
            size 71983269
         
     | 
    	
        2018thresh5test.csv
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:f654c09ae5f8ffbb80ddcb79c2be63e37b8ae184ef99c9555fb1c1625fedf1b7
         
     | 
| 3 | 
         
            +
            size 48195090
         
     | 
    	
        2018thresh5train.csv
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:866bad7a6fc1fcd477f87be9b856d289e47547878fcaeb000d1dba768a7b7468
         
     | 
| 3 | 
         
            +
            size 359260993
         
     | 
    	
        generate_wes_data.py
    ADDED
    
    | 
         @@ -0,0 +1,76 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            from datasets import load_dataset
         
     | 
| 2 | 
         
            +
            import pandas as pd
         
     | 
| 3 | 
         
            +
            import numpy as np
         
     | 
| 4 | 
         
            +
            from tqdm import tqdm
         
     | 
| 5 | 
         
            +
             
     | 
| 6 | 
         
            +
            from collections import defaultdict
         
     | 
| 7 | 
         
            +
            from operator import itemgetter as ig
         
     | 
| 8 | 
         
            +
            from itertools import islice, chain, repeat
         
     | 
| 9 | 
         
            +
            from random import sample, choice, shuffle
         
     | 
| 10 | 
         
            +
            from gc import collect
         
     | 
| 11 | 
         
            +
             
     | 
| 12 | 
         
            +
            def generate_splits(subset, split=[0.75, 0.15, 0.1]):
         
     | 
| 13 | 
         
            +
                assert abs(sum(split) - 1.0) < 0.0001
         
     | 
| 14 | 
         
            +
                # get the data in dictionary form
         
     | 
| 15 | 
         
            +
                groups = defaultdict(list)
         
     | 
| 16 | 
         
            +
                ds = load_dataset('Exr0n/wiki-entity-similarity', subset, split='train')
         
     | 
| 17 | 
         
            +
                ds = list(tqdm(ds, total=len(ds)))
         
     | 
| 18 | 
         
            +
                for article, link in tqdm(map(ig('article', 'link_text'), ds), total=len(ds)):
         
     | 
| 19 | 
         
            +
                    groups[article].append(link)
         
     | 
| 20 | 
         
            +
                del ds
         
     | 
| 21 | 
         
            +
             
     | 
| 22 | 
         
            +
                # greedily allocate splits
         
     | 
| 23 | 
         
            +
                order = sorted(groups.keys(), reverse=True, key=lambda e: groups[e])
         
     | 
| 24 | 
         
            +
                splits = [[] for _ in split]
         
     | 
| 25 | 
         
            +
                sizes = [0.001] * len(split)    # avoid div zero error
         
     | 
| 26 | 
         
            +
                for group in order:
         
     | 
| 27 | 
         
            +
                    impoverished = np.argmax([ s - (x/sum(sizes)) for x, s in zip(sizes, split) ])
         
     | 
| 28 | 
         
            +
                    splits[impoverished].append(group)
         
     | 
| 29 | 
         
            +
                    sizes[impoverished] += len(groups[group])
         
     | 
| 30 | 
         
            +
             
     | 
| 31 | 
         
            +
                sizes = [ int(x) for x in sizes ]
         
     | 
| 32 | 
         
            +
                print('final sizes', sizes, [x/sum(sizes) for x in sizes])
         
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
            +
                # generate positive examples
         
     | 
| 35 | 
         
            +
                ret = [ [[(k, t) for t in groups[k]] for k in keys] for keys in splits ]
         
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
                # generate negative examples randomly (TODO: probably a more elegant swapping soln)
         
     | 
| 38 | 
         
            +
                for i, keys in enumerate(splits):
         
     | 
| 39 | 
         
            +
                    for key in keys:
         
     | 
| 40 | 
         
            +
                        try:
         
     | 
| 41 | 
         
            +
                            got = sample(keys, len(groups[key])+1)
         
     | 
| 42 | 
         
            +
                            ret[i].append(
         
     | 
| 43 | 
         
            +
                                [(key, choice(groups[k])) for k in got if k != key]
         
     | 
| 44 | 
         
            +
                                [:len(groups[key])]
         
     | 
| 45 | 
         
            +
                            )
         
     | 
| 46 | 
         
            +
                        except ValueError:
         
     | 
| 47 | 
         
            +
                            raise ValueError("well frick one group is bigger than all the others combined. try sampling one at a time")
         
     | 
| 48 | 
         
            +
             
     | 
| 49 | 
         
            +
                collect()
         
     | 
| 50 | 
         
            +
                return [(chain(*s), chain(repeat(1, z), repeat(0, z))) for z, s in zip(sizes, ret)]
         
     | 
| 51 | 
         
            +
             
     | 
| 52 | 
         
            +
             
     | 
| 53 | 
         
            +
            if __name__ == '__main__':
         
     | 
| 54 | 
         
            +
                for size in [5, 10, 20]:
         
     | 
| 55 | 
         
            +
                    x = generate_splits(subset='2018thresh' + str(size) + 'corpus')
         
     | 
| 56 | 
         
            +
             
     | 
| 57 | 
         
            +
                    for (data, labels), split in zip(x, ['train', 'dev', 'test']):
         
     | 
| 58 | 
         
            +
                        articles, lts = list(zip(*data))
         
     | 
| 59 | 
         
            +
                        df = pd.DataFrame({ 'article': articles, 'link_text': lts, 'is_same': list(labels) })
         
     | 
| 60 | 
         
            +
                        df = df.sample(frac=1).reset_index(drop=True)
         
     | 
| 61 | 
         
            +
                        df.to_csv('2018thresh' + str(size) + split + '.csv', index=False)
         
     | 
| 62 | 
         
            +
                        # print(df.head(30), df.tail(30))
         
     | 
| 63 | 
         
            +
             
     | 
| 64 | 
         
            +
                # tests
         
     | 
| 65 | 
         
            +
                # for data, labels in x[2:]:
         
     | 
| 66 | 
         
            +
                #     data = list(data)
         
     | 
| 67 | 
         
            +
                #     labels = list(labels)
         
     | 
| 68 | 
         
            +
                #
         
     | 
| 69 | 
         
            +
                #     assert sum(labels) * 2 == len(labels)
         
     | 
| 70 | 
         
            +
                #     num = sum(labels)
         
     | 
| 71 | 
         
            +
                #
         
     | 
| 72 | 
         
            +
                #     before = [ a for a, _ in data[:num] ]
         
     | 
| 73 | 
         
            +
                #     after  = [ a for a, _ in data[num:] ]
         
     | 
| 74 | 
         
            +
                #     assert before == after
         
     | 
| 75 | 
         
            +
                #
         
     | 
| 76 | 
         
            +
                #     print(data[num:])
         
     | 
    	
        wiki-entity-similarity.py
    CHANGED
    
    | 
         @@ -12,33 +12,67 @@ _CITE = '''\ 
     | 
|
| 12 | 
         
             
            }
         
     | 
| 13 | 
         
             
            '''
         
     | 
| 14 | 
         | 
| 
         | 
|
| 
         | 
|
| 15 | 
         
             
            @dataclass
         
     | 
| 16 | 
         
             
            class WikiEntitySimilarityConfig(datasets.BuilderConfig):
         
     | 
| 17 | 
         
             
                """BuilderConfig for CSV."""
         
     | 
| 
         | 
|
| 
         | 
|
| 18 | 
         
             
                threshhold: int = None
         
     | 
| 19 | 
         
            -
                path: str = None
         
     | 
| 20 | 
         | 
| 21 | 
         
             
            class WikiEntitySimilarity(datasets.GeneratorBasedBuilder):
         
     | 
| 22 | 
         
             
                """WES: Learning semantic similarity from 6M names for 1M entities"""
         
     | 
| 23 | 
         
             
                BUILDER_CONFIG_CLASS = WikiEntitySimilarityConfig
         
     | 
| 24 | 
         
             
                BUILDER_CONFIGS = [
         
     | 
| 25 | 
         
             
                    WikiEntitySimilarityConfig(
         
     | 
| 26 | 
         
            -
                        name=' 
     | 
| 27 | 
         
            -
                        description='min 5 inbound links, lowest quality',
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 28 | 
         
             
                        threshhold=5,
         
     | 
| 29 | 
         
            -
                        path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/ 
     | 
| 30 | 
         
             
                    ),
         
     | 
| 31 | 
         
             
                    WikiEntitySimilarityConfig(
         
     | 
| 32 | 
         
            -
                        name=' 
     | 
| 33 | 
         
            -
                        description='min 10 inbound links, medium quality',
         
     | 
| 
         | 
|
| 
         | 
|
| 34 | 
         
             
                        threshhold=10,
         
     | 
| 35 | 
         
            -
                        path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/ 
     | 
| 36 | 
         
             
                    ),
         
     | 
| 37 | 
         
             
                    WikiEntitySimilarityConfig(
         
     | 
| 38 | 
         
            -
                        name=' 
     | 
| 39 | 
         
            -
                        description='min 20 inbound links, high quality',
         
     | 
| 
         | 
|
| 
         | 
|
| 40 | 
         
             
                        threshhold=20,
         
     | 
| 41 | 
         
            -
                        path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/ 
     | 
| 42 | 
         
             
                    ),
         
     | 
| 43 | 
         
             
                ]
         
     | 
| 44 | 
         | 
| 
         @@ -56,12 +90,25 @@ class WikiEntitySimilarity(datasets.GeneratorBasedBuilder): 
     | 
|
| 56 | 
         
             
                    )
         
     | 
| 57 | 
         | 
| 58 | 
         
             
                def _split_generators(self, dl_manager):
         
     | 
| 59 | 
         
            -
                     
     | 
| 60 | 
         
            -
                     
     | 
| 61 | 
         
            -
             
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 62 | 
         | 
| 63 | 
         
            -
                def _generate_examples(self,  
     | 
| 64 | 
         
            -
                    with open( 
     | 
| 65 | 
         
             
                        reader = csv.DictReader(rf)
         
     | 
| 66 | 
         
             
                        for i, row in enumerate(reader):
         
     | 
| 67 | 
         
             
                            yield i, row
         
     | 
| 
         | 
|
| 12 | 
         
             
            }
         
     | 
| 13 | 
         
             
            '''
         
     | 
| 14 | 
         | 
| 15 | 
         
            +
            _HUGGINGFACE_REPO = "https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/"
         
     | 
| 16 | 
         
            +
             
     | 
| 17 | 
         
             
            @dataclass
         
     | 
| 18 | 
         
             
            class WikiEntitySimilarityConfig(datasets.BuilderConfig):
         
     | 
| 19 | 
         
             
                """BuilderConfig for CSV."""
         
     | 
| 20 | 
         
            +
                year: int = None
         
     | 
| 21 | 
         
            +
                type: str = None
         
     | 
| 22 | 
         
             
                threshhold: int = None
         
     | 
| 23 | 
         
            +
                # path: str = None
         
     | 
| 24 | 
         | 
| 25 | 
         
             
            class WikiEntitySimilarity(datasets.GeneratorBasedBuilder):
         
     | 
| 26 | 
         
             
                """WES: Learning semantic similarity from 6M names for 1M entities"""
         
     | 
| 27 | 
         
             
                BUILDER_CONFIG_CLASS = WikiEntitySimilarityConfig
         
     | 
| 28 | 
         
             
                BUILDER_CONFIGS = [
         
     | 
| 29 | 
         
             
                    WikiEntitySimilarityConfig(
         
     | 
| 30 | 
         
            +
                        name='2018thresh5corpus',
         
     | 
| 31 | 
         
            +
                        description='raw link corpus (all true): min 5 inbound links, lowest quality',
         
     | 
| 32 | 
         
            +
                        year=2018,
         
     | 
| 33 | 
         
            +
                        type='corpus',
         
     | 
| 34 | 
         
            +
                        threshhold=5,
         
     | 
| 35 | 
         
            +
                        # path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/link_synonyms-2018-thresh_5.csv"
         
     | 
| 36 | 
         
            +
                    ),
         
     | 
| 37 | 
         
            +
                    WikiEntitySimilarityConfig(
         
     | 
| 38 | 
         
            +
                        name='2018thresh10corpus',
         
     | 
| 39 | 
         
            +
                        description='raw link corpus (all true): min 10 inbound links, medium quality',
         
     | 
| 40 | 
         
            +
                        year=2018,
         
     | 
| 41 | 
         
            +
                        type='corpus',
         
     | 
| 42 | 
         
            +
                        threshhold=10,
         
     | 
| 43 | 
         
            +
                        # path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/link_synonyms-2018-thresh_10.csv"
         
     | 
| 44 | 
         
            +
                    ),
         
     | 
| 45 | 
         
            +
                    WikiEntitySimilarityConfig(
         
     | 
| 46 | 
         
            +
                        name='2018thresh20corpus',
         
     | 
| 47 | 
         
            +
                        description='raw link corpus (all true): min 20 inbound links, high quality',
         
     | 
| 48 | 
         
            +
                        year=2018,
         
     | 
| 49 | 
         
            +
                        type='corpus',
         
     | 
| 50 | 
         
            +
                        threshhold=20,
         
     | 
| 51 | 
         
            +
                        # path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/link_synonyms-2018-thresh_20.csv"
         
     | 
| 52 | 
         
            +
                    ),
         
     | 
| 53 | 
         
            +
                    WikiEntitySimilarityConfig(
         
     | 
| 54 | 
         
            +
                        name='2018thresh5pairs',
         
     | 
| 55 | 
         
            +
                        description='training pairs based on min 5 inbound links, lowest quality',
         
     | 
| 56 | 
         
            +
                        year=2018,
         
     | 
| 57 | 
         
            +
                        type='pairs',
         
     | 
| 58 | 
         
             
                        threshhold=5,
         
     | 
| 59 | 
         
            +
                        # path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/2018thresh5"
         
     | 
| 60 | 
         
             
                    ),
         
     | 
| 61 | 
         
             
                    WikiEntitySimilarityConfig(
         
     | 
| 62 | 
         
            +
                        name='2018thresh10pairs',
         
     | 
| 63 | 
         
            +
                        description='training pairs based on min 10 inbound links, medium quality',
         
     | 
| 64 | 
         
            +
                        year=2018,
         
     | 
| 65 | 
         
            +
                        type='pairs',
         
     | 
| 66 | 
         
             
                        threshhold=10,
         
     | 
| 67 | 
         
            +
                        # path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/2018thresh10"
         
     | 
| 68 | 
         
             
                    ),
         
     | 
| 69 | 
         
             
                    WikiEntitySimilarityConfig(
         
     | 
| 70 | 
         
            +
                        name='2018thresh20pairs',
         
     | 
| 71 | 
         
            +
                        description='training pairs based on min 20 inbound links, high quality',
         
     | 
| 72 | 
         
            +
                        year=2018,
         
     | 
| 73 | 
         
            +
                        type='pairs',
         
     | 
| 74 | 
         
             
                        threshhold=20,
         
     | 
| 75 | 
         
            +
                        # path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/2018thresh20"
         
     | 
| 76 | 
         
             
                    ),
         
     | 
| 77 | 
         
             
                ]
         
     | 
| 78 | 
         | 
| 
         | 
|
| 90 | 
         
             
                    )
         
     | 
| 91 | 
         | 
| 92 | 
         
             
                def _split_generators(self, dl_manager):
         
     | 
| 93 | 
         
            +
                    path = _HUGGINGFACE_REPO + f"{self.config.year}thresh{self.config.threshhold}"
         
     | 
| 94 | 
         
            +
                    if self.config.type == 'corpus':
         
     | 
| 95 | 
         
            +
                        filepath = dl_manager.download(path + 'corpus.csv')
         
     | 
| 96 | 
         
            +
                        return [ datasets.SplitGenerator(name=datasets.Split.TRAIN,
         
     | 
| 97 | 
         
            +
                                                         gen_kwargs={ 'path': filepath }) ]
         
     | 
| 98 | 
         
            +
                    elif self.config.type == 'pairs':
         
     | 
| 99 | 
         
            +
                        ret = []
         
     | 
| 100 | 
         
            +
                        for n, e in zip(['train', 'dev', 'test'],
         
     | 
| 101 | 
         
            +
                                        [datasets.Split.TRAIN,
         
     | 
| 102 | 
         
            +
                                         datasets.Split.VALIDATION,
         
     | 
| 103 | 
         
            +
                                         datasets.Split.TEST]):
         
     | 
| 104 | 
         
            +
                            fp = dl_manager.download(path + n + '.csv')
         
     | 
| 105 | 
         
            +
                            ret.append( datasets.SplitGenerator(name=e, gen_kwargs={ 'path': fp }) )
         
     | 
| 106 | 
         
            +
                        return ret
         
     | 
| 107 | 
         
            +
                    else:
         
     | 
| 108 | 
         
            +
                        raise ValueError(f"invalid dataset type '{self.config.type}', expected 'corpus' for raw links or 'pairs' for trainable pairs with negative examples")
         
     | 
| 109 | 
         | 
| 110 | 
         
            +
                def _generate_examples(self, path):
         
     | 
| 111 | 
         
            +
                    with open(path, 'r') as rf:
         
     | 
| 112 | 
         
             
                        reader = csv.DictReader(rf)
         
     | 
| 113 | 
         
             
                        for i, row in enumerate(reader):
         
     | 
| 114 | 
         
             
                            yield i, row
         
     |