pere commited on
Commit
178dcfa
1 Parent(s): 6b0add3

Upload src/upload_culturax.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. src/upload_culturax.py +95 -61
src/upload_culturax.py CHANGED
@@ -5,16 +5,17 @@ from datetime import datetime
5
  from datasets import Dataset
6
  from huggingface_hub import HfApi, upload_file
7
  import shutil
 
8
 
9
  def clean_jsonl_data(file_path):
10
- """Clean the JSONL file data and ensure proper formatting."""
11
  cleaned_data = []
12
  with open(file_path, "r", encoding="utf-8") as f:
13
  for line_number, line in enumerate(f, start=1):
14
  try:
15
  data = json.loads(line)
16
 
17
- # Check and clean 'timestamp' field
18
  if "timestamp" in data:
19
  if not data["timestamp"] or not isinstance(data["timestamp"], str):
20
  data["timestamp"] = None
@@ -27,11 +28,11 @@ def clean_jsonl_data(file_path):
27
  except ValueError:
28
  data["timestamp"] = None
29
 
30
- # Ensure 'text' field is a string
31
  if "text" in data and not isinstance(data["text"], str):
32
  data["text"] = str(data["text"]) if data["text"] is not None else None
33
 
34
- # Check for other fields, e.g., 'url', 'source'
35
  if "url" in data and not isinstance(data["url"], str):
36
  data["url"] = str(data["url"]) if data["url"] is not None else None
37
 
@@ -41,39 +42,83 @@ def clean_jsonl_data(file_path):
41
  cleaned_data.append(data)
42
 
43
  except json.JSONDecodeError as e:
44
- print(f"JSON decoding error at line {line_number}: {e}")
45
  except Exception as e:
46
  print(f"Error processing line {line_number}: {e}")
47
 
48
  return cleaned_data
49
 
50
  def estimate_num_shards(file_path, target_shard_size_gb=1):
51
- """Estimate the number of shards based on the JSONL file size."""
52
- file_size_gb = os.path.getsize(file_path) / (1024 ** 3) # Convert bytes to GB
53
- num_shards = max(1, int(file_size_gb / target_shard_size_gb) + 1) # Ensure at least one shard
54
  return num_shards
55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  def create_and_upload_dataset(language):
57
  # Define constants
58
  org_name = "ScandLM"
59
  dataset_name = f"{language}_culturax"
60
  repo_id = f"{org_name}/{dataset_name}"
61
  jsonl_file = f"{language}_culturax.jsonl"
62
- parquet_file_prefix = f"{language}_culturax"
63
- jsonl_folder, data_folder, src_folder = "jsonl", "data", "src"
64
-
65
- # Map the language argument to two-letter language code
66
- language_codes = {
67
- "danish": "da",
68
- "swedish": "sv",
69
- "norwegian": "no",
70
- "nynorsk": "nn",
71
- }
72
-
73
- # Get the correct two-letter language code
74
  language_code = language_codes.get(language, "unknown")
75
 
76
- # Simplified YAML tags for dataset card
77
  yaml_tags = (
78
  f"---\n"
79
  f"language: [{language_code}]\n"
@@ -88,14 +133,12 @@ def create_and_upload_dataset(language):
88
  f"```\n"
89
  )
90
 
91
- # Check if JSONL file exists
92
  if not os.path.exists(jsonl_file):
93
  raise FileNotFoundError(f"The file '{jsonl_file}' was not found.")
94
 
95
- # Clean data before creating the dataset
96
  cleaned_data = clean_jsonl_data(jsonl_file)
97
-
98
- # Write cleaned data to a temporary JSONL file
99
  os.makedirs(jsonl_folder, exist_ok=True)
100
  cleaned_jsonl_file = os.path.join(jsonl_folder, f"cleaned_{jsonl_file}")
101
  with open(cleaned_jsonl_file, "w", encoding="utf-8") as f:
@@ -103,16 +146,17 @@ def create_and_upload_dataset(language):
103
  json.dump(entry, f)
104
  f.write("\n")
105
 
106
- # Load the cleaned JSONL file into a Hugging Face dataset
 
 
 
107
  dataset = Dataset.from_json(cleaned_jsonl_file)
108
 
109
- # Estimate Parquet file size and determine number of shards
110
  num_shards = estimate_num_shards(cleaned_jsonl_file, target_shard_size_gb=1)
111
- print(f"Number of shards: {num_shards}")
112
 
113
- # Create Parquet file
114
  os.makedirs(data_folder, exist_ok=True)
115
- # Define parquet filename with a typical training file convention
116
  parquet_files = []
117
  for shard_id in range(num_shards):
118
  shard = dataset.shard(num_shards=num_shards, index=shard_id)
@@ -121,74 +165,64 @@ def create_and_upload_dataset(language):
121
  parquet_files.append(parquet_file)
122
  print(f"Parquet file created: {parquet_file}")
123
 
124
- # Authenticate using the Hugging Face API token
125
  api = HfApi()
126
 
127
- # Create a new dataset in the ScandLM organization
128
  api.create_repo(repo_id=repo_id, repo_type="dataset", private=False, exist_ok=True)
129
  print(f"Dataset repository '{repo_id}' created successfully.")
130
 
131
  # Upload Parquet files
132
  for parquet_file in parquet_files:
133
- upload_file(
134
- path_or_fileobj=parquet_file,
135
- path_in_repo=f"{data_folder}/{os.path.basename(parquet_file)}",
136
  repo_id=repo_id,
137
- repo_type="dataset",
138
  )
139
- print(f"Parquet file '{parquet_file}' uploaded successfully.")
140
 
141
- # Upload JSONL file to the jsonl folder
142
- upload_file(
143
- path_or_fileobj=cleaned_jsonl_file,
144
- path_in_repo=f"{jsonl_folder}/{os.path.basename(cleaned_jsonl_file)}",
145
- repo_id=repo_id,
146
- repo_type="dataset",
147
- )
148
- print(f"JSONL file '{cleaned_jsonl_file}' uploaded successfully.")
149
 
150
- # Create README.md file locally with YAML tags
151
- readme_path = "README.md"
152
  with open(readme_path, "w", encoding="utf-8") as f:
153
  f.write(yaml_tags)
154
 
155
- # Upload README.md to the repository
156
  upload_file(
157
  path_or_fileobj=readme_path,
158
  path_in_repo="README.md",
159
  repo_id=repo_id,
160
  repo_type="dataset",
 
161
  )
162
  print("README.md uploaded successfully.")
163
 
164
- # Upload Python scripts
165
  os.makedirs(src_folder, exist_ok=True)
166
  for script in ["download_culturax.py", "upload_culturax.py"]:
167
  if os.path.exists(script):
168
- upload_file(
169
- path_or_fileobj=script,
170
- path_in_repo=f"{src_folder}/{script}",
171
  repo_id=repo_id,
172
- repo_type="dataset",
173
  )
174
- print(f"Script '{script}' uploaded successfully.")
175
 
176
  # Clean up temporary files
177
  if os.path.exists(readme_path):
178
  os.remove(readme_path)
179
- if os.path.exists(cleaned_jsonl_file):
180
- os.remove(cleaned_jsonl_file)
181
 
182
- # Remove created directories and their contents
183
- shutil.rmtree(jsonl_folder, ignore_errors=True)
184
- shutil.rmtree(data_folder, ignore_errors=True)
185
- shutil.rmtree(src_folder, ignore_errors=True)
186
 
187
  print("Dataset setup complete!")
188
 
189
  if __name__ == "__main__":
190
  parser = argparse.ArgumentParser(description="Upload a cultural dataset to Hugging Face.")
191
  parser.add_argument("language", type=str, help="The language for the dataset (e.g., danish, swedish, norwegian, nynorsk).")
192
-
193
  args = parser.parse_args()
194
  create_and_upload_dataset(args.language)
 
5
  from datasets import Dataset
6
  from huggingface_hub import HfApi, upload_file
7
  import shutil
8
+ import math
9
 
10
  def clean_jsonl_data(file_path):
11
+ """Clean and validate JSONL file data."""
12
  cleaned_data = []
13
  with open(file_path, "r", encoding="utf-8") as f:
14
  for line_number, line in enumerate(f, start=1):
15
  try:
16
  data = json.loads(line)
17
 
18
+ # Validate 'timestamp' field
19
  if "timestamp" in data:
20
  if not data["timestamp"] or not isinstance(data["timestamp"], str):
21
  data["timestamp"] = None
 
28
  except ValueError:
29
  data["timestamp"] = None
30
 
31
+ # Ensure 'text' is a string
32
  if "text" in data and not isinstance(data["text"], str):
33
  data["text"] = str(data["text"]) if data["text"] is not None else None
34
 
35
+ # Validate 'url' and 'source'
36
  if "url" in data and not isinstance(data["url"], str):
37
  data["url"] = str(data["url"]) if data["url"] is not None else None
38
 
 
42
  cleaned_data.append(data)
43
 
44
  except json.JSONDecodeError as e:
45
+ print(f"JSON decode error at line {line_number}: {e}")
46
  except Exception as e:
47
  print(f"Error processing line {line_number}: {e}")
48
 
49
  return cleaned_data
50
 
51
  def estimate_num_shards(file_path, target_shard_size_gb=1):
52
+ """Estimate the number of shards needed based on file size."""
53
+ file_size_gb = os.path.getsize(file_path) / (1024 ** 3) # Bytes to GB
54
+ num_shards = max(1, math.ceil(file_size_gb / target_shard_size_gb))
55
  return num_shards
56
 
57
+ def split_jsonl_file(input_file, output_prefix, max_size_gb=45):
58
+ """Split large JSONL files into smaller shards."""
59
+ file_size_gb = os.path.getsize(input_file) / (1024 ** 3) # Convert bytes to GB
60
+ if file_size_gb <= max_size_gb:
61
+ return [input_file] # No need to split if below limit
62
+
63
+ # Calculate lines per shard
64
+ with open(input_file, "r", encoding="utf-8") as f:
65
+ lines = f.readlines()
66
+ num_lines = len(lines)
67
+
68
+ num_shards = math.ceil(file_size_gb / max_size_gb)
69
+ lines_per_shard = math.ceil(num_lines / num_shards)
70
+
71
+ shard_files = []
72
+ for i in range(num_shards):
73
+ shard_file = f"{output_prefix}_part{i+1}.jsonl"
74
+ with open(shard_file, "w", encoding="utf-8") as f:
75
+ f.writelines(lines[i * lines_per_shard:(i + 1) * lines_per_shard])
76
+ shard_files.append(shard_file)
77
+
78
+ return shard_files
79
+
80
+ def upload_large_file(file_path, repo_id, path_in_repo, repo_type="dataset"):
81
+ """Upload large files with multi-part upload handling."""
82
+ file_size_mb = os.path.getsize(file_path) / (1024 ** 2) # Convert bytes to MB
83
+ # Use multi-part upload for files > 5MB
84
+ if file_size_mb > 5:
85
+ upload_file(
86
+ path_or_fileobj=file_path,
87
+ path_in_repo=path_in_repo,
88
+ repo_id=repo_id,
89
+ repo_type=repo_type,
90
+ use_auth_token=True,
91
+ )
92
+ print(f"Uploaded '{path_in_repo}' with multi-part upload.")
93
+ else:
94
+ # Direct upload for smaller files
95
+ with open(file_path, 'rb') as f:
96
+ api = HfApi()
97
+ api.upload_file(
98
+ path_or_fileobj=f,
99
+ path_in_repo=path_in_repo,
100
+ repo_id=repo_id,
101
+ repo_type=repo_type,
102
+ use_auth_token=True,
103
+ )
104
+ print(f"Uploaded '{path_in_repo}' with direct upload.")
105
+
106
  def create_and_upload_dataset(language):
107
  # Define constants
108
  org_name = "ScandLM"
109
  dataset_name = f"{language}_culturax"
110
  repo_id = f"{org_name}/{dataset_name}"
111
  jsonl_file = f"{language}_culturax.jsonl"
112
+ temp_folder = f"temp_{language}"
113
+ jsonl_folder = os.path.join(temp_folder, "jsonl")
114
+ data_folder = os.path.join(temp_folder, "data")
115
+ src_folder = os.path.join(temp_folder, "src")
116
+
117
+ # Language codes
118
+ language_codes = {"danish": "da", "swedish": "sv", "norwegian": "no", "nynorsk": "nn"}
 
 
 
 
 
119
  language_code = language_codes.get(language, "unknown")
120
 
121
+ # YAML front matter
122
  yaml_tags = (
123
  f"---\n"
124
  f"language: [{language_code}]\n"
 
133
  f"```\n"
134
  )
135
 
136
+ # Verify JSONL file
137
  if not os.path.exists(jsonl_file):
138
  raise FileNotFoundError(f"The file '{jsonl_file}' was not found.")
139
 
140
+ # Clean data and create a temporary JSONL file
141
  cleaned_data = clean_jsonl_data(jsonl_file)
 
 
142
  os.makedirs(jsonl_folder, exist_ok=True)
143
  cleaned_jsonl_file = os.path.join(jsonl_folder, f"cleaned_{jsonl_file}")
144
  with open(cleaned_jsonl_file, "w", encoding="utf-8") as f:
 
146
  json.dump(entry, f)
147
  f.write("\n")
148
 
149
+ # Split JSONL if too large
150
+ jsonl_shards = split_jsonl_file(cleaned_jsonl_file, os.path.join(jsonl_folder, language), max_size_gb=45)
151
+
152
+ # Load data into Dataset
153
  dataset = Dataset.from_json(cleaned_jsonl_file)
154
 
155
+ # Estimate and create Parquet shards
156
  num_shards = estimate_num_shards(cleaned_jsonl_file, target_shard_size_gb=1)
157
+ print(f"Number of Parquet shards: {num_shards}")
158
 
 
159
  os.makedirs(data_folder, exist_ok=True)
 
160
  parquet_files = []
161
  for shard_id in range(num_shards):
162
  shard = dataset.shard(num_shards=num_shards, index=shard_id)
 
165
  parquet_files.append(parquet_file)
166
  print(f"Parquet file created: {parquet_file}")
167
 
168
+ # Authenticate with Hugging Face
169
  api = HfApi()
170
 
171
+ # Create dataset repo
172
  api.create_repo(repo_id=repo_id, repo_type="dataset", private=False, exist_ok=True)
173
  print(f"Dataset repository '{repo_id}' created successfully.")
174
 
175
  # Upload Parquet files
176
  for parquet_file in parquet_files:
177
+ upload_large_file(
178
+ file_path=parquet_file,
 
179
  repo_id=repo_id,
180
+ path_in_repo=f"data/{os.path.basename(parquet_file)}",
181
  )
 
182
 
183
+ # Upload JSONL shards
184
+ for shard_file in jsonl_shards:
185
+ upload_large_file(
186
+ file_path=shard_file,
187
+ repo_id=repo_id,
188
+ path_in_repo=f"jsonl/{os.path.basename(shard_file)}",
189
+ )
 
190
 
191
+ # Upload README
192
+ readme_path = os.path.join(temp_folder, "README.md")
193
  with open(readme_path, "w", encoding="utf-8") as f:
194
  f.write(yaml_tags)
195
 
 
196
  upload_file(
197
  path_or_fileobj=readme_path,
198
  path_in_repo="README.md",
199
  repo_id=repo_id,
200
  repo_type="dataset",
201
+ use_auth_token=True
202
  )
203
  print("README.md uploaded successfully.")
204
 
205
+ # Upload scripts
206
  os.makedirs(src_folder, exist_ok=True)
207
  for script in ["download_culturax.py", "upload_culturax.py"]:
208
  if os.path.exists(script):
209
+ upload_large_file(
210
+ file_path=script,
 
211
  repo_id=repo_id,
212
+ path_in_repo=f"src/{script}",
213
  )
 
214
 
215
  # Clean up temporary files
216
  if os.path.exists(readme_path):
217
  os.remove(readme_path)
 
 
218
 
219
+ # Remove directories
220
+ shutil.rmtree(temp_folder, ignore_errors=True)
 
 
221
 
222
  print("Dataset setup complete!")
223
 
224
  if __name__ == "__main__":
225
  parser = argparse.ArgumentParser(description="Upload a cultural dataset to Hugging Face.")
226
  parser.add_argument("language", type=str, help="The language for the dataset (e.g., danish, swedish, norwegian, nynorsk).")
 
227
  args = parser.parse_args()
228
  create_and_upload_dataset(args.language)