init
Browse files- .gitattributes +2 -0
 - .gitignore +1 -0
 - README.md +70 -0
 - data/train.jsonl +3 -0
 - data/valid.jsonl +3 -0
 - process.py +103 -0
 - semeval2012_relational_similarity.py +81 -0
 
    	
        .gitattributes
    CHANGED
    
    | 
         @@ -52,3 +52,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text 
     | 
|
| 52 | 
         
             
            *.jpg filter=lfs diff=lfs merge=lfs -text
         
     | 
| 53 | 
         
             
            *.jpeg filter=lfs diff=lfs merge=lfs -text
         
     | 
| 54 | 
         
             
            *.webp filter=lfs diff=lfs merge=lfs -text
         
     | 
| 
         | 
|
| 
         | 
| 
         | 
|
| 52 | 
         
             
            *.jpg filter=lfs diff=lfs merge=lfs -text
         
     | 
| 53 | 
         
             
            *.jpeg filter=lfs diff=lfs merge=lfs -text
         
     | 
| 54 | 
         
             
            *.webp filter=lfs diff=lfs merge=lfs -text
         
     | 
| 55 | 
         
            +
            data/train.jsonl filter=lfs diff=lfs merge=lfs -text
         
     | 
| 56 | 
         
            +
            data/valid.jsonl filter=lfs diff=lfs merge=lfs -text
         
     | 
    	
        .gitignore
    ADDED
    
    | 
         @@ -0,0 +1 @@ 
     | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            cache
         
     | 
    	
        README.md
    CHANGED
    
    | 
         @@ -0,0 +1,70 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            ---
         
     | 
| 2 | 
         
            +
            language:
         
     | 
| 3 | 
         
            +
            - en
         
     | 
| 4 | 
         
            +
            license:
         
     | 
| 5 | 
         
            +
            - other
         
     | 
| 6 | 
         
            +
            multilinguality:
         
     | 
| 7 | 
         
            +
            - monolingual
         
     | 
| 8 | 
         
            +
            pretty_name: SemEval2012 task 2 Relational Similarity
         
     | 
| 9 | 
         
            +
            ---
         
     | 
| 10 | 
         
            +
            # Dataset Card for "relbert/semeval2012_relational_similarity"
         
     | 
| 11 | 
         
            +
            ## Dataset Description
         
     | 
| 12 | 
         
            +
            - **Repository:** [RelBERT](https://github.com/asahi417/relbert)
         
     | 
| 13 | 
         
            +
            - **Paper:** [https://aclanthology.org/S12-1047/](https://aclanthology.org/S12-1047/)
         
     | 
| 14 | 
         
            +
            - **Dataset:** SemEval2012: Relational Similarity
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            ### Dataset Summary
         
     | 
| 17 | 
         
            +
            Relational similarity dataset from [SemEval2012 task 2](https://aclanthology.org/S12-1047/), compiled to fine-tune [RelBERT](https://github.com/asahi417/relbert) model.
         
     | 
| 18 | 
         
            +
            The dataset contains a list of positive and negative word pair from 89 pre-defined relations.
         
     | 
| 19 | 
         
            +
            The relation types are constructed on top of following 10 parent relation types.  
         
     | 
| 20 | 
         
            +
            ```shell
         
     | 
| 21 | 
         
            +
            {
         
     | 
| 22 | 
         
            +
                1: "Class Inclusion",  # Hypernym
         
     | 
| 23 | 
         
            +
                2: "Part-Whole",  # Meronym, Substance Meronym
         
     | 
| 24 | 
         
            +
                3: "Similar",  # Synonym, Co-hypornym
         
     | 
| 25 | 
         
            +
                4: "Contrast",  # Antonym
         
     | 
| 26 | 
         
            +
                5: "Attribute",  # Attribute, Event
         
     | 
| 27 | 
         
            +
                6: "Non Attribute",
         
     | 
| 28 | 
         
            +
                7: "Case Relation",
         
     | 
| 29 | 
         
            +
                8: "Cause-Purpose",
         
     | 
| 30 | 
         
            +
                9: "Space-Time",
         
     | 
| 31 | 
         
            +
                10: "Representation"
         
     | 
| 32 | 
         
            +
            }
         
     | 
| 33 | 
         
            +
            ```
         
     | 
| 34 | 
         
            +
            Each of the parent relation is further grouped into child relation types where the definition can be found [here](https://drive.google.com/file/d/0BzcZKTSeYL8VenY0QkVpZVpxYnc/view?resourcekey=0-ZP-UARfJj39PcLroibHPHw).
         
     | 
| 35 | 
         
            +
             
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
            ## Dataset Structure
         
     | 
| 38 | 
         
            +
            ### Data Instances
         
     | 
| 39 | 
         
            +
            An example of `train` looks as follows.
         
     | 
| 40 | 
         
            +
            ```
         
     | 
| 41 | 
         
            +
            {
         
     | 
| 42 | 
         
            +
              'relation_type': '8d',
         
     | 
| 43 | 
         
            +
              'positives': [ [ "breathe", "live" ], [ "study", "learn" ], [ "speak", "communicate" ], ... ]
         
     | 
| 44 | 
         
            +
              'negatives': [ [ "starving", "hungry" ], [ "clean", "bathe" ], [ "hungry", "starving" ], ... ] 
         
     | 
| 45 | 
         
            +
            }
         
     | 
| 46 | 
         
            +
            ```
         
     | 
| 47 | 
         
            +
             
     | 
| 48 | 
         
            +
            ### Data Splits
         
     | 
| 49 | 
         
            +
            |  name   |train|validation|
         
     | 
| 50 | 
         
            +
            |---------|----:|---------:|
         
     | 
| 51 | 
         
            +
            |semeval2012_relational_similarity| 89 |      89|
         
     | 
| 52 | 
         
            +
             
     | 
| 53 | 
         
            +
             
     | 
| 54 | 
         
            +
            ### Citation Information
         
     | 
| 55 | 
         
            +
            ```
         
     | 
| 56 | 
         
            +
            @inproceedings{jurgens-etal-2012-semeval,
         
     | 
| 57 | 
         
            +
                title = "{S}em{E}val-2012 Task 2: Measuring Degrees of Relational Similarity",
         
     | 
| 58 | 
         
            +
                author = "Jurgens, David  and
         
     | 
| 59 | 
         
            +
                  Mohammad, Saif  and
         
     | 
| 60 | 
         
            +
                  Turney, Peter  and
         
     | 
| 61 | 
         
            +
                  Holyoak, Keith",
         
     | 
| 62 | 
         
            +
                booktitle = "*{SEM} 2012: The First Joint Conference on Lexical and Computational Semantics {--} Volume 1: Proceedings of the main conference and the shared task, and Volume 2: Proceedings of the Sixth International Workshop on Semantic Evaluation ({S}em{E}val 2012)",
         
     | 
| 63 | 
         
            +
                month = "7-8 " # jun,
         
     | 
| 64 | 
         
            +
                year = "2012",
         
     | 
| 65 | 
         
            +
                address = "Montr{\'e}al, Canada",
         
     | 
| 66 | 
         
            +
                publisher = "Association for Computational Linguistics",
         
     | 
| 67 | 
         
            +
                url = "https://aclanthology.org/S12-1047",
         
     | 
| 68 | 
         
            +
                pages = "356--364",
         
     | 
| 69 | 
         
            +
            }
         
     | 
| 70 | 
         
            +
            ```
         
     | 
    	
        data/train.jsonl
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:82446ec2cb00f4d66ad37b699a404b5980d366488e49b23a9f55e3dcc3f604af
         
     | 
| 3 | 
         
            +
            size 32277
         
     | 
    	
        data/valid.jsonl
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:7093b17813ae66c8206d9dcf1a6757376db8811956ee350ea1669a4b2313725b
         
     | 
| 3 | 
         
            +
            size 11205
         
     | 
    	
        process.py
    ADDED
    
    | 
         @@ -0,0 +1,103 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            import json
         
     | 
| 2 | 
         
            +
            import os
         
     | 
| 3 | 
         
            +
            import tarfile
         
     | 
| 4 | 
         
            +
            import zipfile
         
     | 
| 5 | 
         
            +
            import gzip
         
     | 
| 6 | 
         
            +
            import requests
         
     | 
| 7 | 
         
            +
            import gdown
         
     | 
| 8 | 
         
            +
            from glob import glob
         
     | 
| 9 | 
         
            +
             
     | 
| 10 | 
         
            +
             
     | 
| 11 | 
         
            +
            def wget(url, cache_dir: str = './cache', gdrive_filename: str = None):
         
     | 
| 12 | 
         
            +
                """ wget and uncompress data_iterator """
         
     | 
| 13 | 
         
            +
                path = _wget(url, cache_dir, gdrive_filename=gdrive_filename)
         
     | 
| 14 | 
         
            +
                if path.endswith('.tar.gz') or path.endswith('.tgz') or path.endswith('.tar'):
         
     | 
| 15 | 
         
            +
                    if path.endswith('.tar'):
         
     | 
| 16 | 
         
            +
                        tar = tarfile.open(path)
         
     | 
| 17 | 
         
            +
                    else:
         
     | 
| 18 | 
         
            +
                        tar = tarfile.open(path, "r:gz")
         
     | 
| 19 | 
         
            +
                    tar.extractall(cache_dir)
         
     | 
| 20 | 
         
            +
                    tar.close()
         
     | 
| 21 | 
         
            +
                    os.remove(path)
         
     | 
| 22 | 
         
            +
                elif path.endswith('.zip'):
         
     | 
| 23 | 
         
            +
                    with zipfile.ZipFile(path, 'r') as zip_ref:
         
     | 
| 24 | 
         
            +
                        zip_ref.extractall(cache_dir)
         
     | 
| 25 | 
         
            +
                    os.remove(path)
         
     | 
| 26 | 
         
            +
                elif path.endswith('.gz'):
         
     | 
| 27 | 
         
            +
                    with gzip.open(path, 'rb') as f:
         
     | 
| 28 | 
         
            +
                        with open(path.replace('.gz', ''), 'wb') as f_write:
         
     | 
| 29 | 
         
            +
                            f_write.write(f.read())
         
     | 
| 30 | 
         
            +
                    os.remove(path)
         
     | 
| 31 | 
         
            +
             
     | 
| 32 | 
         
            +
             
     | 
| 33 | 
         
            +
            def _wget(url: str, cache_dir, gdrive_filename: str = None):
         
     | 
| 34 | 
         
            +
                """ get data from web """
         
     | 
| 35 | 
         
            +
                os.makedirs(cache_dir, exist_ok=True)
         
     | 
| 36 | 
         
            +
                if url.startswith('https://drive.google.com'):
         
     | 
| 37 | 
         
            +
                    assert gdrive_filename is not None, 'please provide fileaname for gdrive download'
         
     | 
| 38 | 
         
            +
                    return gdown.download(url, f'{cache_dir}/{gdrive_filename}', quiet=False)
         
     | 
| 39 | 
         
            +
                filename = os.path.basename(url)
         
     | 
| 40 | 
         
            +
                with open(f'{cache_dir}/{filename}', "wb") as f:
         
     | 
| 41 | 
         
            +
                    r = requests.get(url)
         
     | 
| 42 | 
         
            +
                    f.write(r.content)
         
     | 
| 43 | 
         
            +
                return f'{cache_dir}/{filename}'
         
     | 
| 44 | 
         
            +
             
     | 
| 45 | 
         
            +
             
     | 
| 46 | 
         
            +
            def get_data(n_sample: int = 10, v_rate: float = 0.2, n_sample_max: int = 10):
         
     | 
| 47 | 
         
            +
                assert n_sample <= n_sample_max
         
     | 
| 48 | 
         
            +
                cache_dir = 'cache'
         
     | 
| 49 | 
         
            +
                os.makedirs(cache_dir, exist_ok=True)
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
                path_answer = f'{cache_dir}/Phase2Answers'
         
     | 
| 52 | 
         
            +
                path_scale = f'{cache_dir}/Phase2AnswersScaled'
         
     | 
| 53 | 
         
            +
                url = 'https://drive.google.com/u/0/uc?id=0BzcZKTSeYL8VYWtHVmxUR3FyUmc&export=download'
         
     | 
| 54 | 
         
            +
                filename = 'SemEval-2012-Platinum-Ratings.tar.gz'
         
     | 
| 55 | 
         
            +
                if not (os.path.exists(path_scale) and os.path.exists(path_answer)):
         
     | 
| 56 | 
         
            +
                    wget(url, gdrive_filename=filename, cache_dir=cache_dir)
         
     | 
| 57 | 
         
            +
                files_answer = [os.path.basename(i) for i in glob(f'{path_answer}/*.txt')]
         
     | 
| 58 | 
         
            +
                files_scale = [os.path.basename(i) for i in glob(f'{path_scale}/*.txt')]
         
     | 
| 59 | 
         
            +
                assert files_answer == files_scale, f'files are not matched: {files_scale} vs {files_answer}'
         
     | 
| 60 | 
         
            +
                all_positive_v = {}
         
     | 
| 61 | 
         
            +
                all_negative_v = {}
         
     | 
| 62 | 
         
            +
                all_positive_t = {}
         
     | 
| 63 | 
         
            +
                all_negative_t = {}
         
     | 
| 64 | 
         
            +
                for i in files_scale:
         
     | 
| 65 | 
         
            +
                    relation_id = i.split('-')[-1].replace('.txt', '')
         
     | 
| 66 | 
         
            +
                    with open(f'{path_answer}/{i}', 'r') as f:
         
     | 
| 67 | 
         
            +
                        lines_answer = [l.replace('"', '').split('\t') for l in f.read().split('\n') if not l.startswith('#') and len(l)]
         
     | 
| 68 | 
         
            +
                        relation_type = list(set(list(zip(*lines_answer))[-1]))
         
     | 
| 69 | 
         
            +
                        assert len(relation_type) == 1, relation_type
         
     | 
| 70 | 
         
            +
                    with open(f'{path_scale}/{i}', 'r') as f:
         
     | 
| 71 | 
         
            +
                        lines_scale = [[float(l[:5]), l[6:].replace('"', '')] for l in f.read().split('\n')
         
     | 
| 72 | 
         
            +
                                       if not l.startswith('#') and len(l)]
         
     | 
| 73 | 
         
            +
                        lines_scale = sorted(lines_scale, key=lambda x: x[0])
         
     | 
| 74 | 
         
            +
                        _negative = [tuple(i.split(':')) for i in list(zip(*list(filter(lambda x: x[0] < 0, lines_scale[:n_sample_max]))))[1]]
         
     | 
| 75 | 
         
            +
                        _positive = [tuple(i.split(':')) for i in list(zip(*list(filter(lambda x: x[0] > 0, lines_scale[-n_sample_max:]))))[1]]
         
     | 
| 76 | 
         
            +
                        v_negative = _negative[::int(len(_negative) * (1 - v_rate))]
         
     | 
| 77 | 
         
            +
                        v_positive = _positive[::int(len(_positive) * (1 - v_rate))]
         
     | 
| 78 | 
         
            +
                        t_negative = [i for i in _negative if i not in v_negative]
         
     | 
| 79 | 
         
            +
                        t_positive = [i for i in _positive if i not in v_positive]
         
     | 
| 80 | 
         
            +
             
     | 
| 81 | 
         
            +
                        all_negative_v[relation_id] = v_negative
         
     | 
| 82 | 
         
            +
                        all_positive_v[relation_id] = v_positive
         
     | 
| 83 | 
         
            +
                        all_negative_t[relation_id] = t_negative[:n_sample]
         
     | 
| 84 | 
         
            +
                        all_positive_t[relation_id] = t_positive[-n_sample:]
         
     | 
| 85 | 
         
            +
             
     | 
| 86 | 
         
            +
                return (all_positive_t, all_negative_t), (all_positive_v, all_negative_v)
         
     | 
| 87 | 
         
            +
             
     | 
| 88 | 
         
            +
             
     | 
| 89 | 
         
            +
            if __name__ == '__main__':
         
     | 
| 90 | 
         
            +
                (all_positive_t, all_negative_t), (all_positive_v, all_negative_v) = get_data(n_sample=10, v_rate=0.2, n_sample_max=10)
         
     | 
| 91 | 
         
            +
                os.makedirs('data', exist_ok=True)
         
     | 
| 92 | 
         
            +
             
     | 
| 93 | 
         
            +
                keys = all_positive_t.keys()
         
     | 
| 94 | 
         
            +
                with open("data/train.jsonl", "w") as f:
         
     | 
| 95 | 
         
            +
                    for k in sorted(keys):
         
     | 
| 96 | 
         
            +
                        f.write(json.dumps({"relation_type": k, "positives": all_positive_t[k], "negatives": all_negative_t[k]}))
         
     | 
| 97 | 
         
            +
                        f.write("\n")
         
     | 
| 98 | 
         
            +
             
     | 
| 99 | 
         
            +
                keys = all_positive_v.keys()
         
     | 
| 100 | 
         
            +
                with open("data/valid.jsonl", "w") as f:
         
     | 
| 101 | 
         
            +
                    for k in sorted(keys):
         
     | 
| 102 | 
         
            +
                        f.write(json.dumps({"relation_type": k, "positives": all_positive_v[k], "negatives": all_negative_v[k]}))
         
     | 
| 103 | 
         
            +
                        f.write("\n")
         
     | 
    	
        semeval2012_relational_similarity.py
    CHANGED
    
    | 
         @@ -0,0 +1,81 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            import json
         
     | 
| 2 | 
         
            +
            import datasets
         
     | 
| 3 | 
         
            +
             
     | 
| 4 | 
         
            +
             
     | 
| 5 | 
         
            +
            logger = datasets.logging.get_logger(__name__)
         
     | 
| 6 | 
         
            +
            _DESCRIPTION = """[SemEVAL 2012 task 2: Relational Similarity](https://aclanthology.org/S12-1047/)"""
         
     | 
| 7 | 
         
            +
            _NAME = "semeval2012_relational_similarity"
         
     | 
| 8 | 
         
            +
            _VERSION = "0.0.0"
         
     | 
| 9 | 
         
            +
            _CITATION = """
         
     | 
| 10 | 
         
            +
            @inproceedings{jurgens-etal-2012-semeval,
         
     | 
| 11 | 
         
            +
                title = "{S}em{E}val-2012 Task 2: Measuring Degrees of Relational Similarity",
         
     | 
| 12 | 
         
            +
                author = "Jurgens, David  and
         
     | 
| 13 | 
         
            +
                  Mohammad, Saif  and
         
     | 
| 14 | 
         
            +
                  Turney, Peter  and
         
     | 
| 15 | 
         
            +
                  Holyoak, Keith",
         
     | 
| 16 | 
         
            +
                booktitle = "*{SEM} 2012: The First Joint Conference on Lexical and Computational Semantics {--} Volume 1: Proceedings of the main conference and the shared task, and Volume 2: Proceedings of the Sixth International Workshop on Semantic Evaluation ({S}em{E}val 2012)",
         
     | 
| 17 | 
         
            +
                month = "7-8 " # jun,
         
     | 
| 18 | 
         
            +
                year = "2012",
         
     | 
| 19 | 
         
            +
                address = "Montr{\'e}al, Canada",
         
     | 
| 20 | 
         
            +
                publisher = "Association for Computational Linguistics",
         
     | 
| 21 | 
         
            +
                url = "https://aclanthology.org/S12-1047",
         
     | 
| 22 | 
         
            +
                pages = "356--364",
         
     | 
| 23 | 
         
            +
            }
         
     | 
| 24 | 
         
            +
            """
         
     | 
| 25 | 
         
            +
             
     | 
| 26 | 
         
            +
            _HOME_PAGE = "https://github.com/asahi417/relbert"
         
     | 
| 27 | 
         
            +
            _URL = f'https://huggingface.co/datasets/relbert/{_NAME}/resolve/main/dataset'
         
     | 
| 28 | 
         
            +
            _URLS = {
         
     | 
| 29 | 
         
            +
                str(datasets.Split.TRAIN): [f'{_URL}/train.jsonl'],
         
     | 
| 30 | 
         
            +
                str(datasets.Split.VALIDATION): [f'{_URL}/valid.jsonl'],
         
     | 
| 31 | 
         
            +
            }
         
     | 
| 32 | 
         
            +
             
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
            +
            class SemEVAL2012RelationalSimilarityConfig(datasets.BuilderConfig):
         
     | 
| 35 | 
         
            +
                """BuilderConfig"""
         
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
                def __init__(self, **kwargs):
         
     | 
| 38 | 
         
            +
                    """BuilderConfig.
         
     | 
| 39 | 
         
            +
                    Args:
         
     | 
| 40 | 
         
            +
                      **kwargs: keyword arguments forwarded to super.
         
     | 
| 41 | 
         
            +
                    """
         
     | 
| 42 | 
         
            +
                    super(SemEVAL2012RelationalSimilarityConfig, self).__init__(**kwargs)
         
     | 
| 43 | 
         
            +
             
     | 
| 44 | 
         
            +
             
     | 
| 45 | 
         
            +
            class SemEVAL2012RelationalSimilarity(datasets.GeneratorBasedBuilder):
         
     | 
| 46 | 
         
            +
                """Dataset."""
         
     | 
| 47 | 
         
            +
             
     | 
| 48 | 
         
            +
                BUILDER_CONFIGS = [
         
     | 
| 49 | 
         
            +
                    SemEVAL2012RelationalSimilarityConfig(name=_NAME, version=datasets.Version(_VERSION), description=_DESCRIPTION)
         
     | 
| 50 | 
         
            +
                ]
         
     | 
| 51 | 
         
            +
             
     | 
| 52 | 
         
            +
                def _split_generators(self, dl_manager):
         
     | 
| 53 | 
         
            +
                    downloaded_file = dl_manager.download_and_extract(_URLS)
         
     | 
| 54 | 
         
            +
                    return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]})
         
     | 
| 55 | 
         
            +
                            for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION]]
         
     | 
| 56 | 
         
            +
             
     | 
| 57 | 
         
            +
                def _generate_examples(self, filepaths):
         
     | 
| 58 | 
         
            +
                    _key = 0
         
     | 
| 59 | 
         
            +
                    for filepath in filepaths:
         
     | 
| 60 | 
         
            +
                        logger.info(f"generating examples from = {filepath}")
         
     | 
| 61 | 
         
            +
                        with open(filepath, encoding="utf-8") as f:
         
     | 
| 62 | 
         
            +
                            _list = [i for i in f.read().split('\n') if len(i) > 0]
         
     | 
| 63 | 
         
            +
                            for i in _list:
         
     | 
| 64 | 
         
            +
                                data = json.loads(i)
         
     | 
| 65 | 
         
            +
                                yield _key, data
         
     | 
| 66 | 
         
            +
                                _key += 1
         
     | 
| 67 | 
         
            +
             
     | 
| 68 | 
         
            +
                def _info(self):
         
     | 
| 69 | 
         
            +
                    return datasets.DatasetInfo(
         
     | 
| 70 | 
         
            +
                        description=_DESCRIPTION,
         
     | 
| 71 | 
         
            +
                        features=datasets.Features(
         
     | 
| 72 | 
         
            +
                            {
         
     | 
| 73 | 
         
            +
                                "relation_type": datasets.Value("string"),
         
     | 
| 74 | 
         
            +
                                "positives": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
         
     | 
| 75 | 
         
            +
                                "negatives": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
         
     | 
| 76 | 
         
            +
                            }
         
     | 
| 77 | 
         
            +
                        ),
         
     | 
| 78 | 
         
            +
                        supervised_keys=None,
         
     | 
| 79 | 
         
            +
                        homepage=_HOME_PAGE,
         
     | 
| 80 | 
         
            +
                        citation=_CITATION,
         
     | 
| 81 | 
         
            +
                    )
         
     |