[email protected]
commited on
Merge branch 'main' of https://huggingface.co/datasets/hfmlsoc/hub_weekly_snapshots
Browse files- README.md +58 -1
- hub_download.py +118 -54
README.md
CHANGED
@@ -1,4 +1,61 @@
|
|
1 |
---
|
2 |
license: odbl
|
3 |
---
|
4 |
-
Weekly snapshots of Models, Datasets and Papers on the HF Hub
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: odbl
|
3 |
---
|
4 |
+
Weekly snapshots of Models, Datasets and Papers on the HF Hub
|
5 |
+
|
6 |
+
## Sample code
|
7 |
+
|
8 |
+
To query the dataset to see which snapshots are observable, use e.g.:
|
9 |
+
```python
|
10 |
+
import json
|
11 |
+
|
12 |
+
from datasets import load_dataset
|
13 |
+
from huggingface_hub import HfApi
|
14 |
+
|
15 |
+
REPO_ID = "hfmlsoc/hub_weekly_snapshots"
|
16 |
+
|
17 |
+
hf_api = HfApi()
|
18 |
+
all_files = hf_api.list_repo_files(repo_id=REPO_ID, repo_type="dataset")
|
19 |
+
|
20 |
+
repo_type_to_snapshots = {}
|
21 |
+
for repo_fpath in all_files:
|
22 |
+
if ".parquet" in repo_fpath:
|
23 |
+
repo_type = repo_fpath.split("/")[0]
|
24 |
+
repo_type_to_snapshots[repo_type] = repo_type_to_snapshots.get(repo_type, []) + [repo_fpath]
|
25 |
+
|
26 |
+
for repo_type in repo_type_to_snapshots:
|
27 |
+
repo_type_to_snapshots[repo_type] = sorted(repo_type_to_snapshots[repo_type], key=lambda x:x.split("/")[1])
|
28 |
+
|
29 |
+
repo_type_to_snapshots
|
30 |
+
```
|
31 |
+
|
32 |
+
You can then load a specific snapshot as e.g.:
|
33 |
+
```python
|
34 |
+
date = "2025-01-01"
|
35 |
+
snapshot = load_dataset(REPO_ID, data_files={date.replace("-",""): f"datasets/{date}/datasets.parquet"})
|
36 |
+
snapshot
|
37 |
+
```
|
38 |
+
|
39 |
+
Returning:
|
40 |
+
```
|
41 |
+
DatasetDict({
|
42 |
+
20250101: Dataset({
|
43 |
+
features: ['_id', 'id', 'author', 'cardData', 'disabled', 'gated', 'lastModified', 'likes', 'trendingScore', 'private', 'sha', 'description', 'downloads', 'tags', 'createdAt', 'key', 'paperswithcode_id', 'citation'],
|
44 |
+
num_rows: 276421
|
45 |
+
})
|
46 |
+
})
|
47 |
+
```
|
48 |
+
|
49 |
+
### Sample analysis of top datasets
|
50 |
+
|
51 |
+
To look at the 10 most liked datasets as of January 1st 2025, you can then run:
|
52 |
+
```python
|
53 |
+
[{
|
54 |
+
"id": row['id'],
|
55 |
+
"tags": json.loads(row["cardData"]).get("tags", []),
|
56 |
+
"tasks": json.loads(row["cardData"]).get("task_categories", []),
|
57 |
+
"likes": row['likes'],
|
58 |
+
} for row in snapshot["20250101"].sort("likes", reverse=True).select(range(10))]
|
59 |
+
```
|
60 |
+
|
61 |
+
Most of the user-maintained metadata for Hub repositories is stored in the cardData field, which is saved as a JSON-formated string
|
hub_download.py
CHANGED
@@ -2,59 +2,123 @@ import os
|
|
2 |
import subprocess
|
3 |
import datetime
|
4 |
from pathlib import Path
|
5 |
-
|
6 |
|
7 |
# --- CONFIGURATION ---
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import subprocess
|
3 |
import datetime
|
4 |
from pathlib import Path
|
5 |
+
import shutil
|
6 |
|
7 |
# --- CONFIGURATION ---
|
8 |
+
|
9 |
+
items = ['spaces', 'models', 'datasets', 'daily_papers']
|
10 |
+
REPO_URL = "https://huggingface.co/datasets/cfahlgren1/hub-stats"
|
11 |
+
|
12 |
+
for item in items:
|
13 |
+
FILE_PATH = item+".parquet"
|
14 |
+
DEST_DIR = item
|
15 |
+
CLONE_DIR = "temp_repo"
|
16 |
+
# ----------------------
|
17 |
+
|
18 |
+
# First, make sure git lfs is installed on your system
|
19 |
+
try:
|
20 |
+
subprocess.run(["git", "lfs", "install"], check=True)
|
21 |
+
print("Git LFS installed successfully")
|
22 |
+
except Exception as e:
|
23 |
+
print(f"Warning: Git LFS installation might have failed: {e}")
|
24 |
+
print("Continuing anyway...")
|
25 |
+
|
26 |
+
# Clone repo if not already cloned
|
27 |
+
if not os.path.exists(CLONE_DIR):
|
28 |
+
print(f"Cloning repository with Git LFS: {REPO_URL}")
|
29 |
+
subprocess.run(["git", "lfs", "clone", REPO_URL, CLONE_DIR], check=True)
|
30 |
+
else:
|
31 |
+
print(f"Repository already exists at {CLONE_DIR}")
|
32 |
+
# Force checkout to main branch first before pulling
|
33 |
+
subprocess.run(["git", "-C", CLONE_DIR, "checkout", "main"], check=True)
|
34 |
+
subprocess.run(["git", "-C", CLONE_DIR, "pull"], check=True)
|
35 |
+
|
36 |
+
# Get all commits that affected the file
|
37 |
+
print(f"Finding commits for file: {FILE_PATH}")
|
38 |
+
git_log_cmd = ["git", "-C", CLONE_DIR, "log", "--pretty=format:%H|%ct", "--follow", FILE_PATH]
|
39 |
+
result = subprocess.run(git_log_cmd, capture_output=True, text=True, check=True)
|
40 |
+
commits_info = result.stdout.strip().split('\n')
|
41 |
+
|
42 |
+
if not commits_info or commits_info[0] == '':
|
43 |
+
print("No commits found for the file.")
|
44 |
+
exit()
|
45 |
+
|
46 |
+
# Parse commit info
|
47 |
+
commits = []
|
48 |
+
for commit_line in commits_info:
|
49 |
+
if '|' in commit_line:
|
50 |
+
hexsha, timestamp = commit_line.split('|')
|
51 |
+
commits.append((hexsha, int(timestamp)))
|
52 |
+
|
53 |
+
# Sort commits by timestamp (oldest first)
|
54 |
+
commits.sort(key=lambda x: x[1])
|
55 |
+
|
56 |
+
# Get the date of the first commit
|
57 |
+
start_date = datetime.datetime.fromtimestamp(commits[0][1]).date()
|
58 |
+
end_date = datetime.date.today()
|
59 |
+
|
60 |
+
Path(DEST_DIR).mkdir(parents=True, exist_ok=True)
|
61 |
+
|
62 |
+
current_date = start_date
|
63 |
+
week = datetime.timedelta(weeks=1)
|
64 |
+
|
65 |
+
print(f"Processing weekly snapshots from {start_date} to {end_date}")
|
66 |
+
|
67 |
+
while current_date < end_date:
|
68 |
+
next_date = current_date + week
|
69 |
+
next_date_timestamp = int(datetime.datetime.combine(next_date, datetime.time.min).timestamp())
|
70 |
+
|
71 |
+
# Find latest commit before next_date
|
72 |
+
weekly_commit = None
|
73 |
+
for hexsha, timestamp in reversed(commits):
|
74 |
+
if timestamp < next_date_timestamp:
|
75 |
+
weekly_commit = hexsha
|
76 |
+
commit_date = datetime.datetime.fromtimestamp(timestamp).date()
|
77 |
+
break
|
78 |
+
|
79 |
+
if weekly_commit:
|
80 |
+
try:
|
81 |
+
print(f"Processing week of {current_date} using commit {weekly_commit[:8]}")
|
82 |
+
|
83 |
+
# Create the destination folder
|
84 |
+
weekly_folder = Path(DEST_DIR) / str(current_date)
|
85 |
+
weekly_folder.mkdir(parents=True, exist_ok=True)
|
86 |
+
output_path = weekly_folder / os.path.basename(FILE_PATH)
|
87 |
+
|
88 |
+
# Checkout this specific commit
|
89 |
+
subprocess.run(["git", "-C", CLONE_DIR, "checkout", weekly_commit], check=True)
|
90 |
+
|
91 |
+
# Force Git LFS to fetch the actual file content
|
92 |
+
subprocess.run(["git", "-C", CLONE_DIR, "lfs", "pull"], check=True)
|
93 |
+
|
94 |
+
# Copy the actual file
|
95 |
+
source_file = os.path.join(CLONE_DIR, FILE_PATH)
|
96 |
+
|
97 |
+
if os.path.exists(source_file):
|
98 |
+
shutil.copy2(source_file, output_path)
|
99 |
+
file_size = os.path.getsize(output_path) / (1024 * 1024) # Size in MB
|
100 |
+
print(f"Saved: {output_path} ({file_size:.2f} MB)")
|
101 |
+
|
102 |
+
# Verify if it's actually a parquet file and not just a pointer
|
103 |
+
with open(output_path, 'rb') as f:
|
104 |
+
header = f.read(4)
|
105 |
+
if header != b'PAR1':
|
106 |
+
print(f"WARNING: File doesn't start with Parquet magic bytes. Size: {file_size:.2f} MB")
|
107 |
+
if file_size < 1: # Less than 1MB is suspicious for a Parquet file
|
108 |
+
print("This is likely still an LFS pointer and not the actual file")
|
109 |
+
else:
|
110 |
+
print(f"ERROR: Source file not found at {source_file}")
|
111 |
+
|
112 |
+
except Exception as e:
|
113 |
+
print(f"Error processing commit {weekly_commit}: {e}")
|
114 |
+
else:
|
115 |
+
print(f"No commit found for week of {current_date}")
|
116 |
+
|
117 |
+
current_date = next_date
|
118 |
+
|
119 |
+
# Return to main branch before exiting
|
120 |
+
try:
|
121 |
+
subprocess.run(["git", "-C", CLONE_DIR, "checkout", "main"], check=True)
|
122 |
+
print("Returned to main branch")
|
123 |
+
except Exception as e:
|
124 |
+
print(f"Warning: Couldn't return to main branch: {e}")
|