MohamedBayan
commited on
Commit
·
d92ef04
1
Parent(s):
89d021c
Add Hindi Native datasets
Browse files- Hindi-Hostility-Detection-CONSTRAINT-2021/dev.json +3 -0
- Hindi-Hostility-Detection-CONSTRAINT-2021/test.json +3 -0
- Hindi-Hostility-Detection-CONSTRAINT-2021/train.json +3 -0
- MC_Hinglish1/dev.json +3 -0
- MC_Hinglish1/test.json +3 -0
- MC_Hinglish1/train.json +3 -0
- Natural Language Inference/dev.json +3 -0
- Natural Language Inference/test.json +3 -0
- Natural Language Inference/train.json +3 -0
- Offensive Speech Detection/dev.json +3 -0
- Offensive Speech Detection/test.json +3 -0
- Offensive Speech Detection/train.json +3 -0
- README.md +254 -0
- Sentiment Analysis/dev.json +3 -0
- Sentiment Analysis/test.json +3 -0
- Sentiment Analysis/train.json +3 -0
- capablities_tasks_datasets.png +3 -0
- fake-news/dev.json +3 -0
- fake-news/test.json +3 -0
- fake-news/train.json +3 -0
- hate-speech-detection/dev.json +3 -0
- hate-speech-detection/test.json +3 -0
- hate-speech-detection/train.json +3 -0
- readme_hindi.yaml +3 -0
- xlsum/dev.json +3 -0
- xlsum/test.json +3 -0
- xlsum/train.json +3 -0
Hindi-Hostility-Detection-CONSTRAINT-2021/dev.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d224d0cc6669accd47fd3725563275c401f16a73630aacaa277c5b41509eb2de
|
3 |
+
size 661031
|
Hindi-Hostility-Detection-CONSTRAINT-2021/test.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:20430461520fa771e31480a6b1d29d70d4315b8b64132b3ab264d8c8f364b44a
|
3 |
+
size 1331632
|
Hindi-Hostility-Detection-CONSTRAINT-2021/train.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f46df3609935db5cd3167e9cb709a361831f11605fc94d202643546d6aab7d0e
|
3 |
+
size 4652254
|
MC_Hinglish1/dev.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1dd71941cbae94388b7c865047d2d8e55a924900ff9f88478e10981a1ff4b608
|
3 |
+
size 1342872
|
MC_Hinglish1/test.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5a485d554f8dd3d6cecea9ebbbd3293a759d6b6523b0c37de719f0a340fe7e23
|
3 |
+
size 546442
|
MC_Hinglish1/train.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:76817514321676e1a5970f55aa914e947c0b7cb1550d6bcbb201b40eaf3b5d59
|
3 |
+
size 3084380
|
Natural Language Inference/dev.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8ea5dae1901af101990472231b88dc991df3fd06bb97581f3d3cb49b8b9e2820
|
3 |
+
size 539984
|
Natural Language Inference/test.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f48b2b2cf852c7dfef459c7ca41b17b6da5007918489211cf2f60beca79a813
|
3 |
+
size 458851
|
Natural Language Inference/train.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:16d78b7bd2120016922a00d3330e65e66f237dca449ce44fd8786eb85a9277d6
|
3 |
+
size 1273478
|
Offensive Speech Detection/dev.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:54af52b398f639c834e5058974c4b9193f6848f37fdb711861814e54f190d18e
|
3 |
+
size 203989
|
Offensive Speech Detection/test.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5a28072c589f2f7368277ef386ad3dfee8558f3447a28c5ed59a98f130cd4c9c
|
3 |
+
size 409714
|
Offensive Speech Detection/train.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:18b5f3e809c28cc4c91816617a58c9c4ab35704b87b9203727a7a5f9d0cb85fa
|
3 |
+
size 1393033
|
README.md
ADDED
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: cc-by-nc-sa-4.0
|
3 |
+
task_categories:
|
4 |
+
- text-classification
|
5 |
+
language:
|
6 |
+
- hi
|
7 |
+
tags:
|
8 |
+
- Social Media
|
9 |
+
- News Media
|
10 |
+
- Sentiment
|
11 |
+
- Stance
|
12 |
+
- Emotion
|
13 |
+
pretty_name: "LlamaLens: Specialized Multilingual LLM for Analyzing News and Social Media Content -- Hindi"
|
14 |
+
size_categories:
|
15 |
+
- 10K<n<100K
|
16 |
+
dataset_info:
|
17 |
+
- config_name: Sentiment_Analysis
|
18 |
+
splits:
|
19 |
+
- name: train
|
20 |
+
num_examples: 10039
|
21 |
+
- name: dev
|
22 |
+
num_examples: 1258
|
23 |
+
- name: test
|
24 |
+
num_examples: 1259
|
25 |
+
- config_name: MC_Hinglish1
|
26 |
+
splits:
|
27 |
+
- name: train
|
28 |
+
num_examples: 5177
|
29 |
+
- name: dev
|
30 |
+
num_examples: 2219
|
31 |
+
- name: test
|
32 |
+
num_examples: 1000
|
33 |
+
- config_name: Offensive_Speech_Detection
|
34 |
+
splits:
|
35 |
+
- name: train
|
36 |
+
num_examples: 2172
|
37 |
+
- name: dev
|
38 |
+
num_examples: 318
|
39 |
+
- name: test
|
40 |
+
num_examples: 636
|
41 |
+
- config_name: xlsum
|
42 |
+
splits:
|
43 |
+
- name: train
|
44 |
+
num_examples: 70754
|
45 |
+
- name: dev
|
46 |
+
num_examples: 8847
|
47 |
+
- name: test
|
48 |
+
num_examples: 8847
|
49 |
+
- config_name: Hindi-Hostility-Detection-CONSTRAINT-2021
|
50 |
+
splits:
|
51 |
+
- name: train
|
52 |
+
num_examples: 5718
|
53 |
+
- name: dev
|
54 |
+
num_examples: 811
|
55 |
+
- name: test
|
56 |
+
num_examples: 1651
|
57 |
+
- config_name: hate-speech-detection
|
58 |
+
splits:
|
59 |
+
- name: train
|
60 |
+
num_examples: 3327
|
61 |
+
- name: dev
|
62 |
+
num_examples: 476
|
63 |
+
- name: test
|
64 |
+
num_examples: 951
|
65 |
+
- config_name: fake-news
|
66 |
+
splits:
|
67 |
+
- name: train
|
68 |
+
num_examples: 8393
|
69 |
+
- name: dev
|
70 |
+
num_examples: 1417
|
71 |
+
- name: test
|
72 |
+
num_examples: 2743
|
73 |
+
- config_name: Natural_Language_Inference
|
74 |
+
splits:
|
75 |
+
- name: train
|
76 |
+
num_examples: 1251
|
77 |
+
- name: dev
|
78 |
+
num_examples: 537
|
79 |
+
- name: test
|
80 |
+
num_examples: 447
|
81 |
+
configs:
|
82 |
+
- config_name: Sentiment_Analysis
|
83 |
+
data_files:
|
84 |
+
- split: test
|
85 |
+
path: Sentiment_Analysis/test.json
|
86 |
+
- split: dev
|
87 |
+
path: Sentiment_Analysis/dev.json
|
88 |
+
- split: train
|
89 |
+
path: Sentiment_Analysis/train.json
|
90 |
+
- config_name: MC_Hinglish1
|
91 |
+
data_files:
|
92 |
+
- split: test
|
93 |
+
path: MC_Hinglish1/test.json
|
94 |
+
- split: dev
|
95 |
+
path: MC_Hinglish1/dev.json
|
96 |
+
- split: train
|
97 |
+
path: MC_Hinglish1/train.json
|
98 |
+
- config_name: Offensive_Speech_Detection
|
99 |
+
data_files:
|
100 |
+
- split: test
|
101 |
+
path: Offensive_Speech_Detection/test.json
|
102 |
+
- split: dev
|
103 |
+
path: Offensive_Speech_Detection/dev.json
|
104 |
+
- split: train
|
105 |
+
path: Offensive_Speech_Detection/train.json
|
106 |
+
- config_name: xlsum
|
107 |
+
data_files:
|
108 |
+
- split: test
|
109 |
+
path: xlsum/test.json
|
110 |
+
- split: dev
|
111 |
+
path: xlsum/dev.json
|
112 |
+
- split: train
|
113 |
+
path: xlsum/train.json
|
114 |
+
- config_name: Hindi-Hostility-Detection-CONSTRAINT-2021
|
115 |
+
data_files:
|
116 |
+
- split: test
|
117 |
+
path: Hindi-Hostility-Detection-CONSTRAINT-2021/test.json
|
118 |
+
- split: dev
|
119 |
+
path: Hindi-Hostility-Detection-CONSTRAINT-2021/dev.json
|
120 |
+
- split: train
|
121 |
+
path: Hindi-Hostility-Detection-CONSTRAINT-2021/train.json
|
122 |
+
- config_name: hate-speech-detection
|
123 |
+
data_files:
|
124 |
+
- split: test
|
125 |
+
path: hate-speech-detection/test.json
|
126 |
+
- split: dev
|
127 |
+
path: hate-speech-detection/dev.json
|
128 |
+
- split: train
|
129 |
+
path: hate-speech-detection/train.json
|
130 |
+
- config_name: fake-news
|
131 |
+
data_files:
|
132 |
+
- split: test
|
133 |
+
path: fake-news/test.json
|
134 |
+
- split: dev
|
135 |
+
path: fake-news/dev.json
|
136 |
+
- split: train
|
137 |
+
path: fake-news/train.json
|
138 |
+
- config_name: Natural_Language_Inference
|
139 |
+
data_files:
|
140 |
+
- split: test
|
141 |
+
path: Natural_Language_Inference/test.json
|
142 |
+
- split: dev
|
143 |
+
path: Natural_Language_Inference/dev.json
|
144 |
+
- split: train
|
145 |
+
path: Natural_Language_Inference/train.json
|
146 |
+
---
|
147 |
+
|
148 |
+
# LlamaLens: Specialized Multilingual LLM Dataset
|
149 |
+
|
150 |
+
## Overview
|
151 |
+
|
152 |
+
LlamaLens is a specialized multilingual LLM designed for analyzing news and social media content. It focuses on 18 NLP tasks, leveraging 52 datasets across Arabic, English, and Hindi.
|
153 |
+
|
154 |
+
<p align="center"> <img src="https://huggingface.co/datasets/QCRI/LlamaLens-Arabic/resolve/main/capablities_tasks_datasets.png" style="width: 40%;" id="title-icon"> </p>
|
155 |
+
|
156 |
+
## LlamaLens
|
157 |
+
|
158 |
+
This repo includes scripts needed to run our full pipeline, including data preprocessing and sampling, instruction dataset creation, model fine-tuning, inference and evaluation.
|
159 |
+
|
160 |
+
### Features
|
161 |
+
|
162 |
+
- Multilingual support (Arabic, English, Hindi)
|
163 |
+
- 18 NLP tasks with 52 datasets
|
164 |
+
- Optimized for news and social media content analysis
|
165 |
+
|
166 |
+
## 📂 Dataset Overview
|
167 |
+
|
168 |
+
### Hindi Datasets
|
169 |
+
|
170 |
+
| **Task** | **Dataset** | **# Labels** | **# Train** | **# Test** | **# Dev** |
|
171 |
+
| -------------------------- | ----------------------------------------- | ------------ | ----------- | ---------- | --------- |
|
172 |
+
| Cyberbullying | MC-Hinglish1.0 | 7 | 7,400 | 1,000 | 2,119 |
|
173 |
+
| Factuality | fake-news | 2 | 8,393 | 2,743 | 1,417 |
|
174 |
+
| Hate Speech | hate-speech-detection | 2 | 3,327 | 951 | 476 |
|
175 |
+
| Hate Speech | Hindi-Hostility-Detection-CONSTRAINT-2021 | 15 | 5,718 | 1,651 | 811 |
|
176 |
+
| Natural_Language_Inference | Natural_Language_Inference | 2 | 1,251 | 447 | 537 |
|
177 |
+
| Summarization | xlsum | -- | 70,754 | 8,847 | 8,847 |
|
178 |
+
| Offensive Speech | Offensive_Speech_Detection | 3 | 2,172 | 636 | 318 |
|
179 |
+
| Sentiment | Sentiment_Analysis | 3 | 10,039 | 1,259 | 1,258 |
|
180 |
+
|
181 |
+
---
|
182 |
+
|
183 |
+
## Results
|
184 |
+
|
185 |
+
Below, we present the performance of **L-Lens: LlamaLens** , where *"Eng"* refers to the English-instructed model and *"Native"* refers to the model trained with native language instructions. The results are compared against the SOTA (where available) and the Base: **Llama-Instruct 3.1 baseline**. The **Δ** (Delta) column indicates the difference between LlamaLens and the SOTA performance, calculated as (LlamaLens – SOTA).
|
186 |
+
|
187 |
+
|
188 |
+
---
|
189 |
+
| **Task** | **Dataset** | **Metric** | **SOTA** | **Base** | **L-Lens-Eng** | **L-Lens-Native** | **Δ (L-Lens (Eng) - SOTA)** |
|
190 |
+
|:----------------------------------:|:--------------------------------------------:|:----------:|:--------:|:---------------------:|:---------------------:|:--------------------:|:------------------------:|
|
191 |
+
| Factuality | fake-news | Mi-F1 | -- | 0.759 | 0.994 | 0.993 | -- |
|
192 |
+
| Hate Speech Detection | hate-speech-detection | Mi-F1 | 0.639 | 0.750 | 0.963 | 0.963 | 0.324 |
|
193 |
+
| Hate Speech Detection | Hindi-Hostility-Detection-CONSTRAINT-2021 | W-F1 | 0.841 | 0.469 | 0.753 | 0.753 | -0.088 |
|
194 |
+
| Natural Language Inference | Natural Language Inference | W-F1 | 0.646 | 0.633 | 0.568 | 0.679 | -0.078 |
|
195 |
+
| News Summarization | xlsum | R-2 | 0.136 | 0.078 | 0.171 | 0.170 | 0.035 |
|
196 |
+
| Offensive Language Detection | Offensive Speech Detection | Mi-F1 | 0.723 | 0.621 | 0.862 | 0.865 | 0.139 |
|
197 |
+
| Cyberbullying Detection | MC_Hinglish1 | Acc | 0.609 | 0.233 | 0.625 | 0.627 | 0.016 |
|
198 |
+
| Sentiment Classification | Sentiment Analysis | Acc | 0.697 | 0.552 | 0.647 | 0.654 | -0.050
|
199 |
+
|
200 |
+
|
201 |
+
## File Format
|
202 |
+
|
203 |
+
Each JSONL file in the dataset follows a structured format with the following fields:
|
204 |
+
|
205 |
+
- `id`: Unique identifier for each data entry.
|
206 |
+
- `original_id`: Identifier from the original dataset, if available.
|
207 |
+
- `input`: The original text that needs to be analyzed.
|
208 |
+
- `output`: The label assigned to the text after analysis.
|
209 |
+
- `dataset`: Name of the dataset the entry belongs.
|
210 |
+
- `task`: The specific task type.
|
211 |
+
- `lang`: The language of the input text.
|
212 |
+
- `instructions`: A brief set of instructions describing how the text should be labeled.
|
213 |
+
|
214 |
+
**Example entry in JSONL file:**
|
215 |
+
|
216 |
+
```
|
217 |
+
{
|
218 |
+
"id": "5486ee85-4a70-4b33-8711-fb2a0b6d81e1",
|
219 |
+
"original_id": null,
|
220 |
+
"input": "आप और बाकी सभी मुसलमान समाज के लिए आशीर्वाद हैं.",
|
221 |
+
"output": "not-hateful",
|
222 |
+
"dataset": "hate-speech-detection",
|
223 |
+
"task": "Factuality",
|
224 |
+
"lang": "hi",
|
225 |
+
"instructions": "Classify the given text as either 'not-hateful' or 'hateful'. Return only the label without any explanation, justification, or additional text."
|
226 |
+
}
|
227 |
+
|
228 |
+
```
|
229 |
+
## Model
|
230 |
+
[**LlamaLens on Hugging Face**](https://huggingface.co/QCRI/LlamaLens)
|
231 |
+
|
232 |
+
## Replication Scripts
|
233 |
+
[**LlamaLens GitHub Repository**](https://github.com/firojalam/LlamaLens)
|
234 |
+
|
235 |
+
|
236 |
+
## 📢 Citation
|
237 |
+
|
238 |
+
If you use this dataset, please cite our [paper](https://arxiv.org/pdf/2410.15308):
|
239 |
+
|
240 |
+
```
|
241 |
+
@article{kmainasi2024llamalensspecializedmultilingualllm,
|
242 |
+
title={LlamaLens: Specialized Multilingual LLM for Analyzing News and Social Media Content},
|
243 |
+
author={Mohamed Bayan Kmainasi and Ali Ezzat Shahroor and Maram Hasanain and Sahinur Rahman Laskar and Naeemul Hassan and Firoj Alam},
|
244 |
+
year={2024},
|
245 |
+
journal={arXiv preprint arXiv:2410.15308},
|
246 |
+
volume={},
|
247 |
+
number={},
|
248 |
+
pages={},
|
249 |
+
url={https://arxiv.org/abs/2410.15308},
|
250 |
+
eprint={2410.15308},
|
251 |
+
archivePrefix={arXiv},
|
252 |
+
primaryClass={cs.CL}
|
253 |
+
}
|
254 |
+
```
|
Sentiment Analysis/dev.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:09a09e08f01e59faa7adf80f1f3d0f80f68e047e8a631d863b326fae6d1e5c20
|
3 |
+
size 730870
|
Sentiment Analysis/test.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:70f17a3c0764f70022c5afa9ddf1b49ad5ea7a2bd3ab7fb703ff44a1d5a7ba26
|
3 |
+
size 732090
|
Sentiment Analysis/train.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:85c1ae4363e3bd52fcf8b1e3d6b5d8ac70ab73dbb39017a2c2ec2e2907565072
|
3 |
+
size 5882692
|
capablities_tasks_datasets.png
ADDED
![]() |
Git LFS Details
|
fake-news/dev.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:96335d77e7fa9184a224bbe74dedae9d49526b10ff87d69229ef5938fdffffd1
|
3 |
+
size 14920532
|
fake-news/test.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a185a68a44e305c1b5cb2f6aff3f62f4a6a4728849241e3fa30d483fd98729a0
|
3 |
+
size 28108335
|
fake-news/train.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3389b6be06064b34305539dc1998b475fa497b2cf9cf7450d2159ae57e4af39d
|
3 |
+
size 84928796
|
hate-speech-detection/dev.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7001e9653fe1aa7ef7b6aab198c8f3847ce3468f613ff134f1bbc94c0a12ba4b
|
3 |
+
size 268053
|
hate-speech-detection/test.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:67db4f7bea945121ac51674a83c4c83b68510467954607114fc81c4cf60efba3
|
3 |
+
size 533616
|
hate-speech-detection/train.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b121e85749547a1de93966f0c9bbfc1d8d97ac4efbec8909cf7e0e5d0b70d0f
|
3 |
+
size 1863923
|
readme_hindi.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cf702407954f16601d0de811021136c30e77917e3347780c7647107a20375f9e
|
3 |
+
size 3248
|
xlsum/dev.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:699c9adb1b430058c8fc728fc2427cc2a434ca21fc3d7506a5b920d91572de7b
|
3 |
+
size 59520290
|
xlsum/test.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f4b1818f7f4f00b61ab7f93af0ee12ddfda06d8acac067e7c841b4ed1f815b80
|
3 |
+
size 59846969
|
xlsum/train.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:87f18322d66702c981d8e31415f44d699c97c7a24d4fa4b2e812cef13375e21b
|
3 |
+
size 574606334
|