Create Hugging Face compatible dataset format and update README
Browse files- README.md +85 -18
- create_hf_dataset.py +164 -0
README.md
CHANGED
@@ -14,11 +14,12 @@ task_categories:
|
|
14 |
- image-classification
|
15 |
task_ids:
|
16 |
- sentiment-classification
|
|
|
17 |
tags:
|
18 |
-
-
|
19 |
- social-media
|
20 |
- xiaohongshu
|
21 |
-
-
|
22 |
pretty_name: CHASM - Covert Advertisement on RedNote
|
23 |
---
|
24 |
|
@@ -34,45 +35,111 @@ A dataset containing posts from Xiaohongshu (RedNote) for text classification ta
|
|
34 |
- **Language**: Chinese
|
35 |
- **License**: MIT
|
36 |
- **Dataset Size**: Contains two classes (label_0 and label_1)
|
|
|
37 |
|
38 |
### Dataset Structure
|
39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
Each data sample contains the following fields:
|
41 |
|
42 |
-
- `
|
43 |
-
- `
|
44 |
-
- `
|
45 |
-
- `
|
46 |
-
- `
|
47 |
-
- `
|
|
|
48 |
|
49 |
### Data Example
|
50 |
|
51 |
```json
|
52 |
{
|
53 |
-
"
|
54 |
-
"
|
55 |
-
"
|
56 |
-
"
|
57 |
-
"
|
|
|
58 |
"label": 0
|
59 |
}
|
60 |
```
|
61 |
|
62 |
## Usage
|
63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
```python
|
65 |
from datasets import load_dataset
|
66 |
|
67 |
-
#
|
68 |
dataset = load_dataset("Jingyi77/CHASM-Covert_Advertisement_on_RedNote")
|
69 |
|
70 |
-
#
|
71 |
print(dataset)
|
72 |
|
73 |
-
#
|
74 |
-
print(dataset['
|
75 |
-
print(dataset['label_1'][0]) # View first data point from label_1
|
76 |
```
|
77 |
|
78 |
## Data Preprocessing
|
|
|
14 |
- image-classification
|
15 |
task_ids:
|
16 |
- sentiment-classification
|
17 |
+
- multi-class-classification
|
18 |
tags:
|
19 |
+
- advertisement
|
20 |
- social-media
|
21 |
- xiaohongshu
|
22 |
+
- redbook
|
23 |
pretty_name: CHASM - Covert Advertisement on RedNote
|
24 |
---
|
25 |
|
|
|
35 |
- **Language**: Chinese
|
36 |
- **License**: MIT
|
37 |
- **Dataset Size**: Contains two classes (label_0 and label_1)
|
38 |
+
- **Splits**: Train (3089 samples), Validation (440 samples), Test (888 samples)
|
39 |
|
40 |
### Dataset Structure
|
41 |
|
42 |
+
本数据集有两种格式:
|
43 |
+
|
44 |
+
#### 1. 原始目录格式 (Original Directory Structure)
|
45 |
+
|
46 |
+
数据按照以下目录结构组织:
|
47 |
+
|
48 |
+
- `label_0/`: 非广告内容
|
49 |
+
- `label_1/`: 广告内容
|
50 |
+
|
51 |
+
每个样本有一个独立的文件夹(如 `train_889/`),包含:
|
52 |
+
|
53 |
+
- `data.json`: 包含文本数据
|
54 |
+
- 多个图像文件(JPG/WEBP 格式)
|
55 |
+
|
56 |
+
#### 2. Hugging Face 格式 (Hugging Face Format)
|
57 |
+
|
58 |
+
为了更好地与 Hugging Face 平台兼容,我们还提供了处理后的格式:
|
59 |
+
|
60 |
+
- `hf_format/train.csv` 和 `hf_format/train.json`: 训练集
|
61 |
+
- `hf_format/validation.csv` 和 `hf_format/validation.json`: 验证集
|
62 |
+
- `hf_format/test.csv` 和 `hf_format/test.json`: 测试集
|
63 |
+
- `hf_format/dataset_info.json`: 数据集元信息
|
64 |
+
|
65 |
+
### 数据字段说明
|
66 |
+
|
67 |
Each data sample contains the following fields:
|
68 |
|
69 |
+
- `id`: 样本唯一标识符
|
70 |
+
- `title`: 帖子标题
|
71 |
+
- `description`: 帖子描述
|
72 |
+
- `date`: 发布日期和位置
|
73 |
+
- `comments`: 评论列表
|
74 |
+
- `images`: 图像文件名列表
|
75 |
+
- `label`: 分类标签 (0: 非广告, 1: 广告)
|
76 |
|
77 |
### Data Example
|
78 |
|
79 |
```json
|
80 |
{
|
81 |
+
"id": "train_889",
|
82 |
+
"title": "豆瓣9.2❗️几乎是全程震撼着读完的一本书❗️",
|
83 |
+
"description": "📚《人类新史》\n📝[美]大卫·格雷伯/[英]大卫·温格罗\n\n说实话我几乎是不读社科历史大部头的,一年能读上一本我都想夸自己厉害🤣所以看到这本书的厚度时心里真的发怵,担心自己读不下去,却没想到它出乎意料地好读,才读了百来页我就忍不住先来分享一波了,因为脑袋里信息量爆表了!!...",
|
84 |
+
"date": "编辑于 7 天前 湖南",
|
85 |
+
"comments": ["感谢推荐,已收藏", "你好厉害,能看英文版我看中文版才看了这么点就觉得很费劲", ...],
|
86 |
+
"images": ["1040g008317prti2fk45g48iu7h5portohqvdpdo!nd_dft_wlteh_webp_3_0.jpg", ...],
|
87 |
"label": 0
|
88 |
}
|
89 |
```
|
90 |
|
91 |
## Usage
|
92 |
|
93 |
+
使用原始目录格式:
|
94 |
+
|
95 |
+
```python
|
96 |
+
import os
|
97 |
+
import json
|
98 |
+
from glob import glob
|
99 |
+
|
100 |
+
# 遍历所有样本
|
101 |
+
for label_dir in ['label_0', 'label_1']:
|
102 |
+
for sample_dir in glob(f"{label_dir}/*"):
|
103 |
+
if os.path.isdir(sample_dir):
|
104 |
+
# 读取数据文件
|
105 |
+
with open(os.path.join(sample_dir, 'data.json'), 'r', encoding='utf-8') as f:
|
106 |
+
data = json.load(f)
|
107 |
+
|
108 |
+
# 获取图像文件
|
109 |
+
images = glob(os.path.join(sample_dir, '*.jpg'))
|
110 |
+
|
111 |
+
# 处理数据...
|
112 |
+
```
|
113 |
+
|
114 |
+
使用 Hugging Face 格式:
|
115 |
+
|
116 |
+
```python
|
117 |
+
import pandas as pd
|
118 |
+
|
119 |
+
# 加载数据
|
120 |
+
train_df = pd.read_csv('hf_format/train.csv')
|
121 |
+
val_df = pd.read_csv('hf_format/validation.csv')
|
122 |
+
test_df = pd.read_csv('hf_format/test.csv')
|
123 |
+
|
124 |
+
# 或者使用JSON格式
|
125 |
+
import json
|
126 |
+
with open('hf_format/train.json', 'r', encoding='utf-8') as f:
|
127 |
+
train_data = json.load(f)
|
128 |
+
```
|
129 |
+
|
130 |
+
使用 Hugging Face datasets 库:
|
131 |
+
|
132 |
```python
|
133 |
from datasets import load_dataset
|
134 |
|
135 |
+
# 加载数据集
|
136 |
dataset = load_dataset("Jingyi77/CHASM-Covert_Advertisement_on_RedNote")
|
137 |
|
138 |
+
# 查看数据集信息
|
139 |
print(dataset)
|
140 |
|
141 |
+
# 访问数据
|
142 |
+
print(dataset['train'][0]) # 查看训练集的第一个数据点
|
|
|
143 |
```
|
144 |
|
145 |
## Data Preprocessing
|
create_hf_dataset.py
ADDED
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
import os
|
3 |
+
import json
|
4 |
+
import pandas as pd
|
5 |
+
from pathlib import Path
|
6 |
+
from tqdm import tqdm
|
7 |
+
|
8 |
+
def convert_to_hf_format():
|
9 |
+
"""将现有目录结构转换为Hugging Face兼容的格式"""
|
10 |
+
print("开始转换数据集格式...")
|
11 |
+
|
12 |
+
# 数据集路径
|
13 |
+
dataset_path = "/Users/zhengjingyi/Desktop/image_after"
|
14 |
+
|
15 |
+
# 创建输出目录
|
16 |
+
output_dir = os.path.join(dataset_path, "hf_format")
|
17 |
+
os.makedirs(output_dir, exist_ok=True)
|
18 |
+
|
19 |
+
# 为训练集、验证集和测试集创建空列表
|
20 |
+
train_data = []
|
21 |
+
val_data = []
|
22 |
+
test_data = []
|
23 |
+
|
24 |
+
# 处理 label_0 目录
|
25 |
+
label_0_path = os.path.join(dataset_path, "label_0")
|
26 |
+
print("处理 label_0 目录...")
|
27 |
+
for folder in tqdm(os.listdir(label_0_path)):
|
28 |
+
folder_path = os.path.join(label_0_path, folder)
|
29 |
+
if not os.path.isdir(folder_path):
|
30 |
+
continue
|
31 |
+
|
32 |
+
# 读取数据文件
|
33 |
+
data_file = os.path.join(folder_path, "data.json")
|
34 |
+
if not os.path.exists(data_file):
|
35 |
+
continue
|
36 |
+
|
37 |
+
try:
|
38 |
+
with open(data_file, 'r', encoding='utf-8') as f:
|
39 |
+
data = json.load(f)
|
40 |
+
|
41 |
+
# 获取图像文件列表
|
42 |
+
image_files = [f for f in os.listdir(folder_path) if f.endswith('.jpg') or f.endswith('.webp')]
|
43 |
+
|
44 |
+
# 创建样本
|
45 |
+
sample = {
|
46 |
+
'id': folder,
|
47 |
+
'title': data.get('title', ''),
|
48 |
+
'description': data.get('description', ''),
|
49 |
+
'date': data.get('date', ''),
|
50 |
+
'comments': data.get('comments', []),
|
51 |
+
'images': image_files,
|
52 |
+
'label': 0
|
53 |
+
}
|
54 |
+
|
55 |
+
# 根据目录名分配到相应的数据集
|
56 |
+
if folder.startswith('train'):
|
57 |
+
train_data.append(sample)
|
58 |
+
elif folder.startswith('val'):
|
59 |
+
val_data.append(sample)
|
60 |
+
elif folder.startswith('test'):
|
61 |
+
test_data.append(sample)
|
62 |
+
except Exception as e:
|
63 |
+
print(f"处理 {folder} 时出错: {str(e)}")
|
64 |
+
|
65 |
+
# 处理 label_1 目录
|
66 |
+
label_1_path = os.path.join(dataset_path, "label_1")
|
67 |
+
print("处理 label_1 目录...")
|
68 |
+
for folder in tqdm(os.listdir(label_1_path)):
|
69 |
+
folder_path = os.path.join(label_1_path, folder)
|
70 |
+
if not os.path.isdir(folder_path):
|
71 |
+
continue
|
72 |
+
|
73 |
+
# 读取数据文件
|
74 |
+
data_file = os.path.join(folder_path, "data.json")
|
75 |
+
if not os.path.exists(data_file):
|
76 |
+
continue
|
77 |
+
|
78 |
+
try:
|
79 |
+
with open(data_file, 'r', encoding='utf-8') as f:
|
80 |
+
data = json.load(f)
|
81 |
+
|
82 |
+
# 获取图像文件列表
|
83 |
+
image_files = [f for f in os.listdir(folder_path) if f.endswith('.jpg') or f.endswith('.webp')]
|
84 |
+
|
85 |
+
# 创建样本
|
86 |
+
sample = {
|
87 |
+
'id': folder,
|
88 |
+
'title': data.get('title', ''),
|
89 |
+
'description': data.get('description', ''),
|
90 |
+
'date': data.get('date', ''),
|
91 |
+
'comments': data.get('comments', []),
|
92 |
+
'images': image_files,
|
93 |
+
'label': 1
|
94 |
+
}
|
95 |
+
|
96 |
+
# 根据目录名分配到相应的数据集
|
97 |
+
if folder.startswith('train'):
|
98 |
+
train_data.append(sample)
|
99 |
+
elif folder.startswith('val'):
|
100 |
+
val_data.append(sample)
|
101 |
+
elif folder.startswith('test'):
|
102 |
+
test_data.append(sample)
|
103 |
+
except Exception as e:
|
104 |
+
print(f"处理 {folder} 时出错: {str(e)}")
|
105 |
+
|
106 |
+
# 将数据保存为CSV和JSON格式
|
107 |
+
print("保存数据...")
|
108 |
+
|
109 |
+
# 创建DataFrame
|
110 |
+
train_df = pd.DataFrame(train_data)
|
111 |
+
val_df = pd.DataFrame(val_data)
|
112 |
+
test_df = pd.DataFrame(test_data)
|
113 |
+
|
114 |
+
# 保存为CSV
|
115 |
+
train_df.to_csv(os.path.join(output_dir, "train.csv"), index=False)
|
116 |
+
val_df.to_csv(os.path.join(output_dir, "validation.csv"), index=False)
|
117 |
+
test_df.to_csv(os.path.join(output_dir, "test.csv"), index=False)
|
118 |
+
|
119 |
+
# 保存为JSON
|
120 |
+
with open(os.path.join(output_dir, "train.json"), 'w', encoding='utf-8') as f:
|
121 |
+
json.dump(train_data, f, ensure_ascii=False, indent=2)
|
122 |
+
|
123 |
+
with open(os.path.join(output_dir, "validation.json"), 'w', encoding='utf-8') as f:
|
124 |
+
json.dump(val_data, f, ensure_ascii=False, indent=2)
|
125 |
+
|
126 |
+
with open(os.path.join(output_dir, "test.json"), 'w', encoding='utf-8') as f:
|
127 |
+
json.dump(test_data, f, ensure_ascii=False, indent=2)
|
128 |
+
|
129 |
+
# 创建索引文件
|
130 |
+
with open(os.path.join(output_dir, "dataset_info.json"), 'w', encoding='utf-8') as f:
|
131 |
+
info = {
|
132 |
+
"name": "CHASM-Covert_Advertisement_on_RedNote",
|
133 |
+
"version": "1.0.0",
|
134 |
+
"description": "A dataset containing posts from Xiaohongshu (RedNote) for text classification tasks, specifically focused on identifying covert advertisements",
|
135 |
+
"splits": {
|
136 |
+
"train": {
|
137 |
+
"num_examples": len(train_data)
|
138 |
+
},
|
139 |
+
"validation": {
|
140 |
+
"num_examples": len(val_data)
|
141 |
+
},
|
142 |
+
"test": {
|
143 |
+
"num_examples": len(test_data)
|
144 |
+
}
|
145 |
+
},
|
146 |
+
"features": {
|
147 |
+
"id": {"dtype": "string", "description": "Unique ID for the sample"},
|
148 |
+
"title": {"dtype": "string", "description": "Post title"},
|
149 |
+
"description": {"dtype": "string", "description": "Post description"},
|
150 |
+
"date": {"dtype": "string", "description": "Publication date and location"},
|
151 |
+
"comments": {"sequence": {"dtype": "string"}, "description": "List of comments"},
|
152 |
+
"images": {"sequence": {"dtype": "string"}, "description": "List of image filenames"},
|
153 |
+
"label": {"dtype": "int32", "description": "Classification label (0 or 1)"}
|
154 |
+
}
|
155 |
+
}
|
156 |
+
json.dump(info, f, ensure_ascii=False, indent=2)
|
157 |
+
|
158 |
+
print(f"转换完成!数据已保存到 {output_dir}")
|
159 |
+
print(f"训练集: {len(train_data)} 样本")
|
160 |
+
print(f"验证集: {len(val_data)} 样本")
|
161 |
+
print(f"测试集: {len(test_data)} 样本")
|
162 |
+
|
163 |
+
if __name__ == "__main__":
|
164 |
+
convert_to_hf_format()
|