Commit 
							
							·
						
						e36c994
	
1
								Parent(s):
							
							de9348b
								
add basic configuration and model file
Browse files- configuration_bert.py +203 -0
 - modeling_bert.py +0 -0
 - special_tokens_map.json +7 -0
 - tokenizer_config.json +15 -0
 - vocab.txt +0 -0
 
    	
        configuration_bert.py
    ADDED
    
    | 
         @@ -0,0 +1,203 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # coding=utf-8
         
     | 
| 2 | 
         
            +
            # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
         
     | 
| 3 | 
         
            +
            # Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
         
     | 
| 4 | 
         
            +
            #
         
     | 
| 5 | 
         
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         
     | 
| 6 | 
         
            +
            # you may not use this file except in compliance with the License.
         
     | 
| 7 | 
         
            +
            # You may obtain a copy of the License at
         
     | 
| 8 | 
         
            +
            #
         
     | 
| 9 | 
         
            +
            #     http://www.apache.org/licenses/LICENSE-2.0
         
     | 
| 10 | 
         
            +
            #
         
     | 
| 11 | 
         
            +
            # Unless required by applicable law or agreed to in writing, software
         
     | 
| 12 | 
         
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         
     | 
| 13 | 
         
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         
     | 
| 14 | 
         
            +
            # See the License for the specific language governing permissions and
         
     | 
| 15 | 
         
            +
            # limitations under the License.
         
     | 
| 16 | 
         
            +
            """ BERT model configuration"""
         
     | 
| 17 | 
         
            +
            from collections import OrderedDict
         
     | 
| 18 | 
         
            +
            from typing import Mapping
         
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
            from transformers.configuration_utils import PretrainedConfig
         
     | 
| 21 | 
         
            +
            from transformers.onnx import OnnxConfig
         
     | 
| 22 | 
         
            +
            from transformers.utils import logging
         
     | 
| 23 | 
         
            +
             
     | 
| 24 | 
         
            +
             
     | 
| 25 | 
         
            +
            logger = logging.get_logger(__name__)
         
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
            BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
         
     | 
| 28 | 
         
            +
                "bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
         
     | 
| 29 | 
         
            +
                "bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
         
     | 
| 30 | 
         
            +
                "bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
         
     | 
| 31 | 
         
            +
                "bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
         
     | 
| 32 | 
         
            +
                "bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
         
     | 
| 33 | 
         
            +
                "bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
         
     | 
| 34 | 
         
            +
                "bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
         
     | 
| 35 | 
         
            +
                "bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
         
     | 
| 36 | 
         
            +
                "bert-large-uncased-whole-word-masking": (
         
     | 
| 37 | 
         
            +
                    "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
         
     | 
| 38 | 
         
            +
                ),
         
     | 
| 39 | 
         
            +
                "bert-large-cased-whole-word-masking": (
         
     | 
| 40 | 
         
            +
                    "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
         
     | 
| 41 | 
         
            +
                ),
         
     | 
| 42 | 
         
            +
                "bert-large-uncased-whole-word-masking-finetuned-squad": (
         
     | 
| 43 | 
         
            +
                    "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
         
     | 
| 44 | 
         
            +
                ),
         
     | 
| 45 | 
         
            +
                "bert-large-cased-whole-word-masking-finetuned-squad": (
         
     | 
| 46 | 
         
            +
                    "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
         
     | 
| 47 | 
         
            +
                ),
         
     | 
| 48 | 
         
            +
                "bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
         
     | 
| 49 | 
         
            +
                "bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
         
     | 
| 50 | 
         
            +
                "bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
         
     | 
| 51 | 
         
            +
                "cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
         
     | 
| 52 | 
         
            +
                "cl-tohoku/bert-base-japanese-whole-word-masking": (
         
     | 
| 53 | 
         
            +
                    "https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
         
     | 
| 54 | 
         
            +
                ),
         
     | 
| 55 | 
         
            +
                "cl-tohoku/bert-base-japanese-char": (
         
     | 
| 56 | 
         
            +
                    "https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
         
     | 
| 57 | 
         
            +
                ),
         
     | 
| 58 | 
         
            +
                "cl-tohoku/bert-base-japanese-char-whole-word-masking": (
         
     | 
| 59 | 
         
            +
                    "https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
         
     | 
| 60 | 
         
            +
                ),
         
     | 
| 61 | 
         
            +
                "TurkuNLP/bert-base-finnish-cased-v1": (
         
     | 
| 62 | 
         
            +
                    "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
         
     | 
| 63 | 
         
            +
                ),
         
     | 
| 64 | 
         
            +
                "TurkuNLP/bert-base-finnish-uncased-v1": (
         
     | 
| 65 | 
         
            +
                    "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
         
     | 
| 66 | 
         
            +
                ),
         
     | 
| 67 | 
         
            +
                "wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
         
     | 
| 68 | 
         
            +
                # See all BERT models at https://huggingface.co/models?filter=bert
         
     | 
| 69 | 
         
            +
            }
         
     | 
| 70 | 
         
            +
             
     | 
| 71 | 
         
            +
             
     | 
| 72 | 
         
            +
            class MyBertConfig(PretrainedConfig):
         
     | 
| 73 | 
         
            +
                r"""
         
     | 
| 74 | 
         
            +
                This is the configuration class to store the configuration of a [`BertModel`] or a [`TFBertModel`]. It is used to
         
     | 
| 75 | 
         
            +
                instantiate a BERT model according to the specified arguments, defining the model architecture. Instantiating a
         
     | 
| 76 | 
         
            +
                configuration with the defaults will yield a similar configuration to that of the BERT
         
     | 
| 77 | 
         
            +
                [bert-base-uncased](https://huggingface.co/bert-base-uncased) architecture.
         
     | 
| 78 | 
         
            +
             
     | 
| 79 | 
         
            +
                Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
         
     | 
| 80 | 
         
            +
                documentation from [`PretrainedConfig`] for more information.
         
     | 
| 81 | 
         
            +
             
     | 
| 82 | 
         
            +
             
     | 
| 83 | 
         
            +
                Args:
         
     | 
| 84 | 
         
            +
                    vocab_size (`int`, *optional*, defaults to 30522):
         
     | 
| 85 | 
         
            +
                        Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
         
     | 
| 86 | 
         
            +
                        `inputs_ids` passed when calling [`BertModel`] or [`TFBertModel`].
         
     | 
| 87 | 
         
            +
                    hidden_size (`int`, *optional*, defaults to 768):
         
     | 
| 88 | 
         
            +
                        Dimensionality of the encoder layers and the pooler layer.
         
     | 
| 89 | 
         
            +
                    num_hidden_layers (`int`, *optional*, defaults to 12):
         
     | 
| 90 | 
         
            +
                        Number of hidden layers in the Transformer encoder.
         
     | 
| 91 | 
         
            +
                    num_attention_heads (`int`, *optional*, defaults to 12):
         
     | 
| 92 | 
         
            +
                        Number of attention heads for each attention layer in the Transformer encoder.
         
     | 
| 93 | 
         
            +
                    intermediate_size (`int`, *optional*, defaults to 3072):
         
     | 
| 94 | 
         
            +
                        Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
         
     | 
| 95 | 
         
            +
                    hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
         
     | 
| 96 | 
         
            +
                        The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
         
     | 
| 97 | 
         
            +
                        `"relu"`, `"silu"` and `"gelu_new"` are supported.
         
     | 
| 98 | 
         
            +
                    hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
         
     | 
| 99 | 
         
            +
                        The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
         
     | 
| 100 | 
         
            +
                    attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
         
     | 
| 101 | 
         
            +
                        The dropout ratio for the attention probabilities.
         
     | 
| 102 | 
         
            +
                    max_position_embeddings (`int`, *optional*, defaults to 512):
         
     | 
| 103 | 
         
            +
                        The maximum sequence length that this model might ever be used with. Typically set this to something large
         
     | 
| 104 | 
         
            +
                        just in case (e.g., 512 or 1024 or 2048).
         
     | 
| 105 | 
         
            +
                    type_vocab_size (`int`, *optional*, defaults to 2):
         
     | 
| 106 | 
         
            +
                        The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or [`TFBertModel`].
         
     | 
| 107 | 
         
            +
                    initializer_range (`float`, *optional*, defaults to 0.02):
         
     | 
| 108 | 
         
            +
                        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
         
     | 
| 109 | 
         
            +
                    layer_norm_eps (`float`, *optional*, defaults to 1e-12):
         
     | 
| 110 | 
         
            +
                        The epsilon used by the layer normalization layers.
         
     | 
| 111 | 
         
            +
                    position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
         
     | 
| 112 | 
         
            +
                        Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
         
     | 
| 113 | 
         
            +
                        positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
         
     | 
| 114 | 
         
            +
                        [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
         
     | 
| 115 | 
         
            +
                        For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
         
     | 
| 116 | 
         
            +
                        with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
         
     | 
| 117 | 
         
            +
                    is_decoder (`bool`, *optional*, defaults to `False`):
         
     | 
| 118 | 
         
            +
                        Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
         
     | 
| 119 | 
         
            +
                    use_cache (`bool`, *optional*, defaults to `True`):
         
     | 
| 120 | 
         
            +
                        Whether or not the model should return the last key/values attentions (not used by all models). Only
         
     | 
| 121 | 
         
            +
                        relevant if `config.is_decoder=True`.
         
     | 
| 122 | 
         
            +
                    classifier_dropout (`float`, *optional*):
         
     | 
| 123 | 
         
            +
                        The dropout ratio for the classification head.
         
     | 
| 124 | 
         
            +
                    feed_forward_type (`str`, *optional*, defaults to `"original"`):
         
     | 
| 125 | 
         
            +
                        The type of feed forward layer to use in the bert layers.
         
     | 
| 126 | 
         
            +
                        Can be one of GLU variants, e.g. `"reglu"`, `"geglu"`
         
     | 
| 127 | 
         
            +
                    emb_pooler (`str`, *optional*, defaults to `None`):
         
     | 
| 128 | 
         
            +
                        The function to use for pooling the last layer embeddings to get the sentence embeddings.
         
     | 
| 129 | 
         
            +
                        Should be one of `None`, `"mean"`.
         
     | 
| 130 | 
         
            +
             
     | 
| 131 | 
         
            +
                Examples:
         
     | 
| 132 | 
         
            +
             
     | 
| 133 | 
         
            +
                ```python
         
     | 
| 134 | 
         
            +
                >>> from transformers import BertConfig, BertModel
         
     | 
| 135 | 
         
            +
             
     | 
| 136 | 
         
            +
                >>> # Initializing a BERT bert-base-uncased style configuration
         
     | 
| 137 | 
         
            +
                >>> configuration = BertConfig()
         
     | 
| 138 | 
         
            +
             
     | 
| 139 | 
         
            +
                >>> # Initializing a model (with random weights) from the bert-base-uncased style configuration
         
     | 
| 140 | 
         
            +
                >>> model = BertModel(configuration)
         
     | 
| 141 | 
         
            +
             
     | 
| 142 | 
         
            +
                >>> # Accessing the model configuration
         
     | 
| 143 | 
         
            +
                >>> configuration = model.config
         
     | 
| 144 | 
         
            +
                ```"""
         
     | 
| 145 | 
         
            +
                model_type = "bert"
         
     | 
| 146 | 
         
            +
             
     | 
| 147 | 
         
            +
                def __init__(
         
     | 
| 148 | 
         
            +
                    self,
         
     | 
| 149 | 
         
            +
                    vocab_size=30522,
         
     | 
| 150 | 
         
            +
                    hidden_size=768,
         
     | 
| 151 | 
         
            +
                    num_hidden_layers=12,
         
     | 
| 152 | 
         
            +
                    num_attention_heads=12,
         
     | 
| 153 | 
         
            +
                    intermediate_size=3072,
         
     | 
| 154 | 
         
            +
                    hidden_act="gelu",
         
     | 
| 155 | 
         
            +
                    hidden_dropout_prob=0.1,
         
     | 
| 156 | 
         
            +
                    attention_probs_dropout_prob=0.1,
         
     | 
| 157 | 
         
            +
                    max_position_embeddings=512,
         
     | 
| 158 | 
         
            +
                    type_vocab_size=2,
         
     | 
| 159 | 
         
            +
                    initializer_range=0.02,
         
     | 
| 160 | 
         
            +
                    layer_norm_eps=1e-12,
         
     | 
| 161 | 
         
            +
                    pad_token_id=0,
         
     | 
| 162 | 
         
            +
                    position_embedding_type="absolute",
         
     | 
| 163 | 
         
            +
                    use_cache=True,
         
     | 
| 164 | 
         
            +
                    classifier_dropout=None,
         
     | 
| 165 | 
         
            +
                    feed_forward_type="original",
         
     | 
| 166 | 
         
            +
                    emb_pooler=None,
         
     | 
| 167 | 
         
            +
                    **kwargs,
         
     | 
| 168 | 
         
            +
                ):
         
     | 
| 169 | 
         
            +
                    super().__init__(pad_token_id=pad_token_id, **kwargs)
         
     | 
| 170 | 
         
            +
             
     | 
| 171 | 
         
            +
                    self.vocab_size = vocab_size
         
     | 
| 172 | 
         
            +
                    self.hidden_size = hidden_size
         
     | 
| 173 | 
         
            +
                    self.num_hidden_layers = num_hidden_layers
         
     | 
| 174 | 
         
            +
                    self.num_attention_heads = num_attention_heads
         
     | 
| 175 | 
         
            +
                    self.hidden_act = hidden_act
         
     | 
| 176 | 
         
            +
                    self.intermediate_size = intermediate_size
         
     | 
| 177 | 
         
            +
                    self.hidden_dropout_prob = hidden_dropout_prob
         
     | 
| 178 | 
         
            +
                    self.attention_probs_dropout_prob = attention_probs_dropout_prob
         
     | 
| 179 | 
         
            +
                    self.max_position_embeddings = max_position_embeddings
         
     | 
| 180 | 
         
            +
                    self.type_vocab_size = type_vocab_size
         
     | 
| 181 | 
         
            +
                    self.initializer_range = initializer_range
         
     | 
| 182 | 
         
            +
                    self.layer_norm_eps = layer_norm_eps
         
     | 
| 183 | 
         
            +
                    self.position_embedding_type = position_embedding_type
         
     | 
| 184 | 
         
            +
                    self.use_cache = use_cache
         
     | 
| 185 | 
         
            +
                    self.classifier_dropout = classifier_dropout
         
     | 
| 186 | 
         
            +
                    self.feed_forward_type = feed_forward_type
         
     | 
| 187 | 
         
            +
                    self.emb_pooler = emb_pooler
         
     | 
| 188 | 
         
            +
             
     | 
| 189 | 
         
            +
             
     | 
| 190 | 
         
            +
            class MyBertOnnxConfig(OnnxConfig):
         
     | 
| 191 | 
         
            +
                @property
         
     | 
| 192 | 
         
            +
                def inputs(self) -> Mapping[str, Mapping[int, str]]:
         
     | 
| 193 | 
         
            +
                    if self.task == "multiple-choice":
         
     | 
| 194 | 
         
            +
                        dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
         
     | 
| 195 | 
         
            +
                    else:
         
     | 
| 196 | 
         
            +
                        dynamic_axis = {0: "batch", 1: "sequence"}
         
     | 
| 197 | 
         
            +
                    return OrderedDict(
         
     | 
| 198 | 
         
            +
                        [
         
     | 
| 199 | 
         
            +
                            ("input_ids", dynamic_axis),
         
     | 
| 200 | 
         
            +
                            ("attention_mask", dynamic_axis),
         
     | 
| 201 | 
         
            +
                            ("token_type_ids", dynamic_axis),
         
     | 
| 202 | 
         
            +
                        ]
         
     | 
| 203 | 
         
            +
                    )
         
     | 
    	
        modeling_bert.py
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        special_tokens_map.json
    ADDED
    
    | 
         @@ -0,0 +1,7 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "cls_token": "[CLS]",
         
     | 
| 3 | 
         
            +
              "mask_token": "[MASK]",
         
     | 
| 4 | 
         
            +
              "pad_token": "[PAD]",
         
     | 
| 5 | 
         
            +
              "sep_token": "[SEP]",
         
     | 
| 6 | 
         
            +
              "unk_token": "[UNK]"
         
     | 
| 7 | 
         
            +
            }
         
     | 
    	
        tokenizer_config.json
    ADDED
    
    | 
         @@ -0,0 +1,15 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "clean_up_tokenization_spaces": true,
         
     | 
| 3 | 
         
            +
              "cls_token": "[CLS]",
         
     | 
| 4 | 
         
            +
              "do_basic_tokenize": true,
         
     | 
| 5 | 
         
            +
              "do_lower_case": true,
         
     | 
| 6 | 
         
            +
              "mask_token": "[MASK]",
         
     | 
| 7 | 
         
            +
              "model_max_length": 8192,
         
     | 
| 8 | 
         
            +
              "never_split": null,
         
     | 
| 9 | 
         
            +
              "pad_token": "[PAD]",
         
     | 
| 10 | 
         
            +
              "sep_token": "[SEP]",
         
     | 
| 11 | 
         
            +
              "strip_accents": null,
         
     | 
| 12 | 
         
            +
              "tokenize_chinese_chars": true,
         
     | 
| 13 | 
         
            +
              "tokenizer_class": "BertTokenizer",
         
     | 
| 14 | 
         
            +
              "unk_token": "[UNK]"
         
     | 
| 15 | 
         
            +
            }
         
     | 
    	
        vocab.txt
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         |