qingy2024 commited on
Commit
77f5a3f
·
verified ·
1 Parent(s): d5bea5e

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ vatex_training_v1.0.json filter=lfs diff=lfs merge=lfs -text
.ipynb_checkpoints/vatex_download-checkpoint.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import subprocess
3
+ import argparse
4
+ import logging
5
+ import sys
6
+ import os
7
+ import multiprocessing
8
+ import glob # Import the glob module
9
+ from tqdm import tqdm
10
+
11
+ # --- Setup Logging ---
12
+ logging.basicConfig(
13
+ level=logging.INFO,
14
+ format='%(asctime)s - %(levelname)s - %(message)s',
15
+ stream=sys.stdout
16
+ )
17
+
18
+ def download_worker(task_args):
19
+ """
20
+ The worker function for a single download task.
21
+ This function is executed by each process in the multiprocessing pool.
22
+
23
+ Args:
24
+ task_args (tuple): A tuple containing (item, output_dir).
25
+ - item (dict): The video dictionary from the JSON.
26
+ - output_dir (str): The directory to save the video.
27
+
28
+ Returns:
29
+ tuple: A tuple containing (bool, str) for success/failure and the video_info_string.
30
+ e.g., (True, 'video_id_...') or (False, 'video_id_...')
31
+ """
32
+ item, output_dir = task_args
33
+ video_info_string = None # Initialize in case of early failure
34
+
35
+ try:
36
+ # Get the full video info string (e.g., 'bjtnAh_wz1c_000002_000012')
37
+ video_info_string = item['videoID']
38
+
39
+ # This logic is now robust against underscores in the video ID
40
+ parts = video_info_string.split('_')
41
+ if len(parts) < 3:
42
+ raise ValueError("videoID string does not have enough parts.")
43
+
44
+ end_time = parts[-1] # The last part is the end time
45
+ start_time = parts[-2] # The second-to-last part is the start time
46
+ video_id = "_".join(parts[:-2]) # Join everything else to form the video_id
47
+
48
+ start_seconds = int(start_time)
49
+ end_seconds = int(end_time)
50
+
51
+ except (KeyError, ValueError) as e:
52
+ logging.warning(f"Skipping entry due to malformed data: {item}. Error: {e}")
53
+ # Return failure with a placeholder if video_info_string could not be parsed
54
+ return False, (video_info_string or f"malformed_data:_{item}")
55
+
56
+ # Define the full output path for the video file
57
+ output_path_template = os.path.join(output_dir, f"{video_info_string}.%(ext)s")
58
+ youtube_url = f"https://www.youtube.com/watch?v={video_id}"
59
+
60
+ command = [
61
+ 'yt-dlp',
62
+ '--quiet', '--no-warnings',
63
+ '-o', output_path_template, # Use -o as a shorthand for --output
64
+ '--download-sections', f"*{start_seconds}-{end_seconds}",
65
+ '--force-keyframes-at-cuts',
66
+ '--remux-video', 'mp4',
67
+ youtube_url
68
+ ]
69
+
70
+ try:
71
+ # Using capture_output=True and text=True to get stdout/stderr if needed
72
+ subprocess.run(command, check=True, capture_output=True, text=True)
73
+ return True, video_info_string
74
+ except FileNotFoundError:
75
+ logging.error("CRITICAL: 'yt-dlp' command not found. Please ensure it's installed and in your PATH.")
76
+ # This error is critical and will likely affect all workers, but we return failure for this task.
77
+ return False, video_info_string
78
+ except subprocess.CalledProcessError as e:
79
+ logging.error(f"Failed to download {video_info_string}. Reason: {e.stderr.strip()}")
80
+ return False, video_info_string
81
+ except Exception as e:
82
+ logging.error(f"An unexpected error occurred for {video_info_string}: {e}")
83
+ return False, video_info_string
84
+
85
+ def process_downloads(json_file_path, output_dir, num_jobs):
86
+ """
87
+ Orchestrates the parallel downloading of video segments, skipping existing files.
88
+
89
+ Args:
90
+ json_file_path (str): The path to the input JSON file.
91
+ output_dir (str): The directory to save downloaded videos.
92
+ num_jobs (int): The number of parallel processes to use.
93
+ """
94
+ try:
95
+ with open(json_file_path, 'r', encoding='utf-8') as f:
96
+ video_data = json.load(f)
97
+ logging.info(f"Loaded {len(video_data)} video entries from '{json_file_path}'.")
98
+ except FileNotFoundError:
99
+ logging.error(f"Error: The file '{json_file_path}' was not found.")
100
+ sys.exit(1)
101
+ except json.JSONDecodeError:
102
+ logging.error(f"Error: Failed to decode JSON from '{json_file_path}'. Check format.")
103
+ sys.exit(1)
104
+
105
+ os.makedirs(output_dir, exist_ok=True)
106
+ logging.info(f"Output directory set to '{output_dir}'.")
107
+
108
+ # --- NEW: Pre-filter tasks to implement resumability ---
109
+ tasks_to_run = []
110
+ skipped_count = 0
111
+ logging.info("Checking for existing files to skip...")
112
+
113
+ for item in tqdm(video_data, desc="Scanning for existing files"):
114
+ try:
115
+ video_info_string = item['videoID']
116
+ # Create a pattern to match filename with any extension
117
+ file_pattern = os.path.join(output_dir, f"{video_info_string}.*")
118
+ # Use glob.glob to find if any file matches the pattern
119
+ if glob.glob(file_pattern):
120
+ skipped_count += 1
121
+ else:
122
+ tasks_to_run.append((item, output_dir))
123
+ except KeyError:
124
+ # This will be handled properly by the worker, just pass it through
125
+ tasks_to_run.append((item, output_dir))
126
+
127
+ if not tasks_to_run:
128
+ logging.info("All video files already exist. Nothing to download.")
129
+ logging.info(f"Total files skipped: {skipped_count}")
130
+ return
131
+
132
+ logging.info(f"Found {skipped_count} existing files. Queuing {len(tasks_to_run)} new downloads.")
133
+ logging.info(f"Starting downloads with {num_jobs} parallel jobs...")
134
+
135
+ success_count = 0
136
+ failure_count = 0
137
+
138
+ with multiprocessing.Pool(processes=num_jobs) as pool:
139
+ # Use imap_unordered for efficiency, as download order doesn't matter
140
+ results_iterator = pool.imap_unordered(download_worker, tasks_to_run)
141
+
142
+ for _ in tqdm(range(len(tasks_to_run)), desc="Downloading Videos"):
143
+ success, video_id = next(results_iterator)
144
+ if success:
145
+ success_count += 1
146
+ else:
147
+ failure_count += 1
148
+
149
+ logging.info("--- Download Summary ---")
150
+ logging.info(f"Successfully downloaded: {success_count}")
151
+ logging.info(f"Skipped (already exist): {skipped_count}")
152
+ logging.info(f"Failed to download: {failure_count}")
153
+ logging.info("------------------------")
154
+
155
+ def main():
156
+ """Main function to parse arguments and start the download process."""
157
+ parser = argparse.ArgumentParser(
158
+ description="Download video segments in parallel from a JSON file using yt-dlp. Skips existing files.",
159
+ formatter_class=argparse.RawTextHelpFormatter
160
+ )
161
+ parser.add_argument(
162
+ "json_file",
163
+ type=str,
164
+ help="Path to the JSON file containing video information."
165
+ )
166
+ parser.add_argument(
167
+ "-o", "--output-dir",
168
+ type=str,
169
+ default="video_downloads",
170
+ help="Directory to save downloaded videos. (Default: 'video_downloads')"
171
+ )
172
+ parser.add_argument(
173
+ "-j", "--jobs",
174
+ type=int,
175
+ default=4,
176
+ help="Number of parallel download jobs to run. (Default: 4)"
177
+ )
178
+
179
+ args = parser.parse_args()
180
+
181
+ if args.jobs <= 0:
182
+ logging.error("Number of jobs must be a positive integer.")
183
+ sys.exit(1)
184
+
185
+ process_downloads(args.json_file, args.output_dir, args.jobs)
186
+
187
+ if __name__ == "__main__":
188
+ main()
.ipynb_checkpoints/vatex_monitor_downloads-checkpoint.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import curses
2
+ import json
3
+ import time
4
+ import sys
5
+ import os
6
+ import glob
7
+ import argparse
8
+ from datetime import datetime
9
+
10
+ # --- Configuration ---
11
+ BAR_CHAR = "#" # Simpler character that works in all terminals
12
+ EMPTY_BAR_CHAR = "-"
13
+ REFRESH_INTERVAL_SECONDS = 1
14
+
15
+ def format_time(seconds):
16
+ """Format seconds into a human-readable time string."""
17
+ if seconds < 60:
18
+ return f"{seconds:.1f}s"
19
+ elif seconds < 3600:
20
+ minutes = seconds / 60
21
+ return f"{minutes:.1f}m"
22
+ else:
23
+ hours = seconds / 3600
24
+ return f"{hours:.1f}h"
25
+
26
+ def analyze_progress(json_file_path, output_dir, last_stats=None):
27
+ """
28
+ Analyzes the JSON file and output directory to calculate progress.
29
+ """
30
+ start_time = time.time()
31
+
32
+ # 1. Get the set of all expected video IDs from the JSON file
33
+ try:
34
+ with open(json_file_path, 'r', encoding='utf-8') as f:
35
+ video_data = json.load(f)
36
+ expected_video_ids = {item['videoID']: item.get('title', 'Unknown')
37
+ for item in video_data if 'videoID' in item}
38
+ total_videos = len(expected_video_ids)
39
+ except (FileNotFoundError, json.JSONDecodeError):
40
+ return 0, 0, 0, [], 0, 0, {}
41
+
42
+ # 2. Get the set of all completed video IDs from the output directory
43
+ completed_video_ids = set()
44
+ completed_files_with_info = []
45
+ file_sizes = {}
46
+
47
+ try:
48
+ for filepath in glob.glob(os.path.join(output_dir, '*.*')):
49
+ basename = os.path.basename(filepath)
50
+ video_id = os.path.splitext(basename)[0]
51
+
52
+ if video_id in expected_video_ids:
53
+ completed_video_ids.add(video_id)
54
+ try:
55
+ mtime = os.path.getmtime(filepath)
56
+ size = os.path.getsize(filepath)
57
+ title = expected_video_ids.get(video_id, 'Unknown')
58
+ completed_files_with_info.append((basename, mtime, size, title, video_id))
59
+ file_sizes[video_id] = size
60
+ except FileNotFoundError:
61
+ continue
62
+ except FileNotFoundError:
63
+ pass
64
+
65
+ # 3. Sort recent files by modification time (newest first)
66
+ completed_files_with_info.sort(key=lambda x: x[1], reverse=True)
67
+ recent_files = completed_files_with_info[:10] # Show up to 10 recent files
68
+
69
+ # 4. Calculate stats
70
+ completed_videos = len(completed_video_ids)
71
+ pending_videos = total_videos - completed_videos
72
+
73
+ # 5. Calculate download rate and ETA
74
+ download_rate = 0
75
+ eta = 0
76
+
77
+ if last_stats and 'completed' in last_stats and last_stats['completed'] < completed_videos:
78
+ time_diff = time.time() - last_stats.get('timestamp', start_time)
79
+ if time_diff > 0:
80
+ download_rate = (completed_videos - last_stats['completed']) / time_diff * 60 # per minute
81
+ if download_rate > 0 and pending_videos > 0:
82
+ eta = pending_videos / download_rate * 60 # seconds
83
+
84
+ return total_videos, completed_videos, pending_videos, recent_files, download_rate, eta, file_sizes
85
+
86
+ def draw_progress_bar(stdscr, y, x, width, label, percentage, color_pair):
87
+ """Draws a label and a progress bar with improved visuals."""
88
+ if width <= 0 or y >= stdscr.getmaxyx()[0]:
89
+ return
90
+
91
+ # Reserve space for label, percentage, and brackets
92
+ label_width = len(label)
93
+ percent_str = f" [{percentage:5.1f}%]"
94
+ bar_area_width = width - label_width - len(percent_str)
95
+
96
+ if bar_area_width <= 0:
97
+ return
98
+
99
+ filled_len = int(bar_area_width * percentage / 100)
100
+ bar = BAR_CHAR * filled_len + EMPTY_BAR_CHAR * (bar_area_width - filled_len)
101
+
102
+ try:
103
+ stdscr.addstr(y, x, label, curses.color_pair(3))
104
+ stdscr.addstr(y, x + label_width, bar, color_pair)
105
+ stdscr.addstr(y, x + label_width + bar_area_width, percent_str)
106
+ except curses.error:
107
+ # Safely handle drawing errors
108
+ pass
109
+
110
+ def format_size(size_bytes):
111
+ """Format bytes into human-readable format."""
112
+ if size_bytes < 1024:
113
+ return f"{size_bytes} B"
114
+ elif size_bytes < 1024 * 1024:
115
+ return f"{size_bytes/1024:.1f} KB"
116
+ elif size_bytes < 1024 * 1024 * 1024:
117
+ return f"{size_bytes/(1024*1024):.1f} MB"
118
+ else:
119
+ return f"{size_bytes/(1024*1024*1024):.2f} GB"
120
+
121
+ def safe_addstr(stdscr, y, x, text, attr=0):
122
+ """Safely add a string to the screen, handling boundary errors."""
123
+ height, width = stdscr.getmaxyx()
124
+ if y < 0 or y >= height or x < 0 or x >= width:
125
+ return
126
+
127
+ # Truncate the string if it would go off-screen
128
+ max_len = width - x
129
+ if max_len <= 0:
130
+ return
131
+
132
+ display_text = text[:max_len]
133
+
134
+ try:
135
+ stdscr.addstr(y, x, display_text, attr)
136
+ except curses.error:
137
+ # Safely handle any curses errors
138
+ pass
139
+
140
+ def main(stdscr, json_path, output_dir):
141
+ # --- Curses Setup ---
142
+ curses.curs_set(0) # Hide cursor
143
+ curses.use_default_colors()
144
+ curses.init_pair(1, curses.COLOR_GREEN, -1) # Success / Bar
145
+ curses.init_pair(2, curses.COLOR_WHITE, -1) # Normal text
146
+ curses.init_pair(3, curses.COLOR_CYAN, -1) # Labels
147
+ curses.init_pair(4, curses.COLOR_RED, -1) # Error / Warning
148
+ curses.init_pair(5, curses.COLOR_YELLOW, -1) # Info
149
+
150
+ # For tracking download rate
151
+ last_stats = None
152
+ start_time = time.time()
153
+
154
+ while True:
155
+ stdscr.erase()
156
+ height, width = stdscr.getmaxyx()
157
+
158
+ # --- Data Loading ---
159
+ total, completed, pending, recent_files, download_rate, eta, file_sizes = analyze_progress(
160
+ json_path, output_dir, last_stats
161
+ )
162
+
163
+ # Update stats for next iteration
164
+ last_stats = {
165
+ 'completed': completed,
166
+ 'timestamp': time.time()
167
+ }
168
+
169
+ # Calculate total downloaded size
170
+ total_size = sum(file_sizes.values())
171
+
172
+ # --- Drawing UI ---
173
+ # Header
174
+ header = f"Download Monitor - {os.path.basename(json_path)}"
175
+ safe_addstr(stdscr, 0, 2, header, curses.A_BOLD)
176
+ safe_addstr(stdscr, 1, 2, f"Output: {os.path.basename(output_dir)}", curses.A_DIM)
177
+ safe_addstr(stdscr, 2, 2, "Press 'q' to quit, 'r' to refresh", curses.A_DIM)
178
+
179
+ try:
180
+ stdscr.hline(3, 0, "-", width)
181
+ except curses.error:
182
+ pass
183
+
184
+ y_pos = 5
185
+
186
+ # Stats Section
187
+ if total == 0:
188
+ safe_addstr(stdscr, y_pos, 4, "Waiting for data or JSON not found...", curses.color_pair(4) | curses.A_BOLD)
189
+ y_pos += 2
190
+ else:
191
+ # Stats header
192
+ safe_addstr(stdscr, y_pos, 4, "Download Statistics", curses.color_pair(3) | curses.A_BOLD)
193
+ y_pos += 2
194
+
195
+ # Left column stats
196
+ safe_addstr(stdscr, y_pos, 4, f"Total Videos: {total}", curses.color_pair(2))
197
+ safe_addstr(stdscr, y_pos + 1, 4, f"Completed: {completed}", curses.color_pair(1))
198
+ safe_addstr(stdscr, y_pos + 2, 4, f"Pending: {pending}", curses.color_pair(5))
199
+
200
+ # Right column stats if there's enough space
201
+ if width > 50:
202
+ right_col = width // 2
203
+ safe_addstr(stdscr, y_pos, right_col, f"Total Size: {format_size(total_size)}", curses.color_pair(2))
204
+ safe_addstr(stdscr, y_pos + 1, right_col, f"Rate: {download_rate:.1f} videos/min", curses.color_pair(1))
205
+
206
+ if eta > 0:
207
+ eta_str = format_time(eta)
208
+ safe_addstr(stdscr, y_pos + 2, right_col, f"ETA: {eta_str}", curses.color_pair(5))
209
+ else:
210
+ safe_addstr(stdscr, y_pos + 2, right_col, "ETA: calculating...", curses.color_pair(5))
211
+
212
+ y_pos += 4
213
+
214
+ # Progress Bar
215
+ percentage = (completed / total) * 100 if total > 0 else 0
216
+ draw_progress_bar(stdscr, y_pos, 4, width - 8, "Progress: ", percentage, curses.color_pair(1))
217
+ y_pos += 2
218
+
219
+ # Runtime
220
+ runtime = time.time() - start_time
221
+ runtime_str = format_time(runtime)
222
+ safe_addstr(stdscr, y_pos, 4, f"Runtime: {runtime_str}", curses.color_pair(2))
223
+ y_pos += 2
224
+
225
+ # Separator
226
+ try:
227
+ stdscr.hline(y_pos, 0, "-", width)
228
+ except curses.error:
229
+ pass
230
+
231
+ y_pos += 1
232
+
233
+ # Recent Files Section
234
+ safe_addstr(stdscr, y_pos, 2, "Recently Completed Files:", curses.color_pair(3) | curses.A_BOLD)
235
+ y_pos += 2
236
+
237
+ if recent_files:
238
+ for i, (filename, mtime, size, title, video_id) in enumerate(recent_files):
239
+ if y_pos >= height - 1:
240
+ break
241
+
242
+ # Format the time
243
+ time_str = datetime.fromtimestamp(mtime).strftime("%H:%M:%S")
244
+ size_str = format_size(size)
245
+
246
+ # Display title if available, otherwise filename
247
+ display_name = title if title != 'Unknown' else filename
248
+
249
+ # Truncate long names
250
+ max_name_len = width - 25 if width > 25 else 10
251
+ if len(display_name) > max_name_len:
252
+ display_name = display_name[:max_name_len-3] + "..."
253
+
254
+ file_info = f"{display_name} ({size_str}, {time_str})"
255
+ safe_addstr(stdscr, y_pos, 4, file_info, curses.color_pair(2))
256
+ y_pos += 1
257
+ else:
258
+ safe_addstr(stdscr, y_pos, 4, "(No completed files found yet)", curses.color_pair(4))
259
+
260
+ stdscr.refresh()
261
+
262
+ # Non-blocking input with a timeout
263
+ stdscr.timeout(REFRESH_INTERVAL_SECONDS * 1000)
264
+ key = stdscr.getch()
265
+ if key == ord('q'):
266
+ break
267
+ elif key == ord('r'):
268
+ # Force refresh
269
+ continue
270
+
271
+ if __name__ == "__main__":
272
+ parser = argparse.ArgumentParser(
273
+ description="A curses-based monitor for the video downloader script.",
274
+ formatter_class=argparse.RawTextHelpFormatter
275
+ )
276
+ parser.add_argument(
277
+ "json_file",
278
+ type=str,
279
+ help="Path to the JSON file containing video information (the same one used by the downloader)."
280
+ )
281
+ parser.add_argument(
282
+ "output_dir",
283
+ type=str,
284
+ help="Path to the output directory where videos are being saved."
285
+ )
286
+ args = parser.parse_args()
287
+
288
+ print(f"Starting download monitor...")
289
+ print(f" - Watching JSON: {args.json_file}")
290
+ print(f" - Watching Dir: {args.output_dir}")
291
+ print("\nPress 'q' in the monitor window to exit.")
292
+ time.sleep(1)
293
+
294
+ try:
295
+ curses.wrapper(main, args.json_file, args.output_dir)
296
+ except curses.error as e:
297
+ print(f"\nCurses error: {e}")
298
+ print("Your terminal window might be too small to run the monitor.")
299
+ except KeyboardInterrupt:
300
+ print("\nMonitor stopped.")
vatex-dataset.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83b91fc11c98614c4f75c24b5899210c4646966bfc9d74da46cc2a6197d10382
3
+ size 44904550397
vatex_download.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import subprocess
3
+ import argparse
4
+ import logging
5
+ import sys
6
+ import os
7
+ import multiprocessing
8
+ import glob # Import the glob module
9
+ from tqdm import tqdm
10
+
11
+ # --- Setup Logging ---
12
+ logging.basicConfig(
13
+ level=logging.INFO,
14
+ format='%(asctime)s - %(levelname)s - %(message)s',
15
+ stream=sys.stdout
16
+ )
17
+
18
+ def download_worker(task_args):
19
+ """
20
+ The worker function for a single download task.
21
+ This function is executed by each process in the multiprocessing pool.
22
+
23
+ Args:
24
+ task_args (tuple): A tuple containing (item, output_dir).
25
+ - item (dict): The video dictionary from the JSON.
26
+ - output_dir (str): The directory to save the video.
27
+
28
+ Returns:
29
+ tuple: A tuple containing (bool, str) for success/failure and the video_info_string.
30
+ e.g., (True, 'video_id_...') or (False, 'video_id_...')
31
+ """
32
+ item, output_dir = task_args
33
+ video_info_string = None # Initialize in case of early failure
34
+
35
+ try:
36
+ # Get the full video info string (e.g., 'bjtnAh_wz1c_000002_000012')
37
+ video_info_string = item['videoID']
38
+
39
+ # This logic is now robust against underscores in the video ID
40
+ parts = video_info_string.split('_')
41
+ if len(parts) < 3:
42
+ raise ValueError("videoID string does not have enough parts.")
43
+
44
+ end_time = parts[-1] # The last part is the end time
45
+ start_time = parts[-2] # The second-to-last part is the start time
46
+ video_id = "_".join(parts[:-2]) # Join everything else to form the video_id
47
+
48
+ start_seconds = int(start_time)
49
+ end_seconds = int(end_time)
50
+
51
+ except (KeyError, ValueError) as e:
52
+ logging.warning(f"Skipping entry due to malformed data: {item}. Error: {e}")
53
+ # Return failure with a placeholder if video_info_string could not be parsed
54
+ return False, (video_info_string or f"malformed_data:_{item}")
55
+
56
+ # Define the full output path for the video file
57
+ output_path_template = os.path.join(output_dir, f"{video_info_string}.%(ext)s")
58
+ youtube_url = f"https://www.youtube.com/watch?v={video_id}"
59
+
60
+ command = [
61
+ 'yt-dlp',
62
+ '--quiet', '--no-warnings',
63
+ '-o', output_path_template, # Use -o as a shorthand for --output
64
+ '--download-sections', f"*{start_seconds}-{end_seconds}",
65
+ '--force-keyframes-at-cuts',
66
+ '--remux-video', 'mp4',
67
+ youtube_url
68
+ ]
69
+
70
+ try:
71
+ # Using capture_output=True and text=True to get stdout/stderr if needed
72
+ subprocess.run(command, check=True, capture_output=True, text=True)
73
+ return True, video_info_string
74
+ except FileNotFoundError:
75
+ logging.error("CRITICAL: 'yt-dlp' command not found. Please ensure it's installed and in your PATH.")
76
+ # This error is critical and will likely affect all workers, but we return failure for this task.
77
+ return False, video_info_string
78
+ except subprocess.CalledProcessError as e:
79
+ logging.error(f"Failed to download {video_info_string}. Reason: {e.stderr.strip()}")
80
+ return False, video_info_string
81
+ except Exception as e:
82
+ logging.error(f"An unexpected error occurred for {video_info_string}: {e}")
83
+ return False, video_info_string
84
+
85
+ def process_downloads(json_file_path, output_dir, num_jobs):
86
+ """
87
+ Orchestrates the parallel downloading of video segments, skipping existing files.
88
+
89
+ Args:
90
+ json_file_path (str): The path to the input JSON file.
91
+ output_dir (str): The directory to save downloaded videos.
92
+ num_jobs (int): The number of parallel processes to use.
93
+ """
94
+ try:
95
+ with open(json_file_path, 'r', encoding='utf-8') as f:
96
+ video_data = json.load(f)
97
+ logging.info(f"Loaded {len(video_data)} video entries from '{json_file_path}'.")
98
+ except FileNotFoundError:
99
+ logging.error(f"Error: The file '{json_file_path}' was not found.")
100
+ sys.exit(1)
101
+ except json.JSONDecodeError:
102
+ logging.error(f"Error: Failed to decode JSON from '{json_file_path}'. Check format.")
103
+ sys.exit(1)
104
+
105
+ os.makedirs(output_dir, exist_ok=True)
106
+ logging.info(f"Output directory set to '{output_dir}'.")
107
+
108
+ # --- NEW: Pre-filter tasks to implement resumability ---
109
+ tasks_to_run = []
110
+ skipped_count = 0
111
+ logging.info("Checking for existing files to skip...")
112
+
113
+ for item in tqdm(video_data, desc="Scanning for existing files"):
114
+ try:
115
+ video_info_string = item['videoID']
116
+ # Create a pattern to match filename with any extension
117
+ file_pattern = os.path.join(output_dir, f"{video_info_string}.*")
118
+ # Use glob.glob to find if any file matches the pattern
119
+ if glob.glob(file_pattern):
120
+ skipped_count += 1
121
+ else:
122
+ tasks_to_run.append((item, output_dir))
123
+ except KeyError:
124
+ # This will be handled properly by the worker, just pass it through
125
+ tasks_to_run.append((item, output_dir))
126
+
127
+ if not tasks_to_run:
128
+ logging.info("All video files already exist. Nothing to download.")
129
+ logging.info(f"Total files skipped: {skipped_count}")
130
+ return
131
+
132
+ logging.info(f"Found {skipped_count} existing files. Queuing {len(tasks_to_run)} new downloads.")
133
+ logging.info(f"Starting downloads with {num_jobs} parallel jobs...")
134
+
135
+ success_count = 0
136
+ failure_count = 0
137
+
138
+ with multiprocessing.Pool(processes=num_jobs) as pool:
139
+ # Use imap_unordered for efficiency, as download order doesn't matter
140
+ results_iterator = pool.imap_unordered(download_worker, tasks_to_run)
141
+
142
+ for _ in tqdm(range(len(tasks_to_run)), desc="Downloading Videos"):
143
+ success, video_id = next(results_iterator)
144
+ if success:
145
+ success_count += 1
146
+ else:
147
+ failure_count += 1
148
+
149
+ logging.info("--- Download Summary ---")
150
+ logging.info(f"Successfully downloaded: {success_count}")
151
+ logging.info(f"Skipped (already exist): {skipped_count}")
152
+ logging.info(f"Failed to download: {failure_count}")
153
+ logging.info("------------------------")
154
+
155
+ def main():
156
+ """Main function to parse arguments and start the download process."""
157
+ parser = argparse.ArgumentParser(
158
+ description="Download video segments in parallel from a JSON file using yt-dlp. Skips existing files.",
159
+ formatter_class=argparse.RawTextHelpFormatter
160
+ )
161
+ parser.add_argument(
162
+ "json_file",
163
+ type=str,
164
+ help="Path to the JSON file containing video information."
165
+ )
166
+ parser.add_argument(
167
+ "-o", "--output-dir",
168
+ type=str,
169
+ default="video_downloads",
170
+ help="Directory to save downloaded videos. (Default: 'video_downloads')"
171
+ )
172
+ parser.add_argument(
173
+ "-j", "--jobs",
174
+ type=int,
175
+ default=4,
176
+ help="Number of parallel download jobs to run. (Default: 4)"
177
+ )
178
+
179
+ args = parser.parse_args()
180
+
181
+ if args.jobs <= 0:
182
+ logging.error("Number of jobs must be a positive integer.")
183
+ sys.exit(1)
184
+
185
+ process_downloads(args.json_file, args.output_dir, args.jobs)
186
+
187
+ if __name__ == "__main__":
188
+ main()
vatex_monitor_downloads.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import curses
2
+ import json
3
+ import time
4
+ import sys
5
+ import os
6
+ import glob
7
+ import argparse
8
+ from datetime import datetime
9
+
10
+ # --- Configuration ---
11
+ BAR_CHAR = "#" # Simpler character that works in all terminals
12
+ EMPTY_BAR_CHAR = "-"
13
+ REFRESH_INTERVAL_SECONDS = 1
14
+
15
+ def format_time(seconds):
16
+ """Format seconds into a human-readable time string."""
17
+ if seconds < 60:
18
+ return f"{seconds:.1f}s"
19
+ elif seconds < 3600:
20
+ minutes = seconds / 60
21
+ return f"{minutes:.1f}m"
22
+ else:
23
+ hours = seconds / 3600
24
+ return f"{hours:.1f}h"
25
+
26
+ def analyze_progress(json_file_path, output_dir, last_stats=None):
27
+ """
28
+ Analyzes the JSON file and output directory to calculate progress.
29
+ """
30
+ start_time = time.time()
31
+
32
+ # 1. Get the set of all expected video IDs from the JSON file
33
+ try:
34
+ with open(json_file_path, 'r', encoding='utf-8') as f:
35
+ video_data = json.load(f)
36
+ expected_video_ids = {item['videoID']: item.get('title', 'Unknown')
37
+ for item in video_data if 'videoID' in item}
38
+ total_videos = len(expected_video_ids)
39
+ except (FileNotFoundError, json.JSONDecodeError):
40
+ return 0, 0, 0, [], 0, 0, {}
41
+
42
+ # 2. Get the set of all completed video IDs from the output directory
43
+ completed_video_ids = set()
44
+ completed_files_with_info = []
45
+ file_sizes = {}
46
+
47
+ try:
48
+ for filepath in glob.glob(os.path.join(output_dir, '*.*')):
49
+ basename = os.path.basename(filepath)
50
+ video_id = os.path.splitext(basename)[0]
51
+
52
+ if video_id in expected_video_ids:
53
+ completed_video_ids.add(video_id)
54
+ try:
55
+ mtime = os.path.getmtime(filepath)
56
+ size = os.path.getsize(filepath)
57
+ title = expected_video_ids.get(video_id, 'Unknown')
58
+ completed_files_with_info.append((basename, mtime, size, title, video_id))
59
+ file_sizes[video_id] = size
60
+ except FileNotFoundError:
61
+ continue
62
+ except FileNotFoundError:
63
+ pass
64
+
65
+ # 3. Sort recent files by modification time (newest first)
66
+ completed_files_with_info.sort(key=lambda x: x[1], reverse=True)
67
+ recent_files = completed_files_with_info[:10] # Show up to 10 recent files
68
+
69
+ # 4. Calculate stats
70
+ completed_videos = len(completed_video_ids)
71
+ pending_videos = total_videos - completed_videos
72
+
73
+ # 5. Calculate download rate and ETA
74
+ download_rate = 0
75
+ eta = 0
76
+
77
+ if last_stats and 'completed' in last_stats and last_stats['completed'] < completed_videos:
78
+ time_diff = time.time() - last_stats.get('timestamp', start_time)
79
+ if time_diff > 0:
80
+ download_rate = (completed_videos - last_stats['completed']) / time_diff * 60 # per minute
81
+ if download_rate > 0 and pending_videos > 0:
82
+ eta = pending_videos / download_rate * 60 # seconds
83
+
84
+ return total_videos, completed_videos, pending_videos, recent_files, download_rate, eta, file_sizes
85
+
86
+ def draw_progress_bar(stdscr, y, x, width, label, percentage, color_pair):
87
+ """Draws a label and a progress bar with improved visuals."""
88
+ if width <= 0 or y >= stdscr.getmaxyx()[0]:
89
+ return
90
+
91
+ # Reserve space for label, percentage, and brackets
92
+ label_width = len(label)
93
+ percent_str = f" [{percentage:5.1f}%]"
94
+ bar_area_width = width - label_width - len(percent_str)
95
+
96
+ if bar_area_width <= 0:
97
+ return
98
+
99
+ filled_len = int(bar_area_width * percentage / 100)
100
+ bar = BAR_CHAR * filled_len + EMPTY_BAR_CHAR * (bar_area_width - filled_len)
101
+
102
+ try:
103
+ stdscr.addstr(y, x, label, curses.color_pair(3))
104
+ stdscr.addstr(y, x + label_width, bar, color_pair)
105
+ stdscr.addstr(y, x + label_width + bar_area_width, percent_str)
106
+ except curses.error:
107
+ # Safely handle drawing errors
108
+ pass
109
+
110
+ def format_size(size_bytes):
111
+ """Format bytes into human-readable format."""
112
+ if size_bytes < 1024:
113
+ return f"{size_bytes} B"
114
+ elif size_bytes < 1024 * 1024:
115
+ return f"{size_bytes/1024:.1f} KB"
116
+ elif size_bytes < 1024 * 1024 * 1024:
117
+ return f"{size_bytes/(1024*1024):.1f} MB"
118
+ else:
119
+ return f"{size_bytes/(1024*1024*1024):.2f} GB"
120
+
121
+ def safe_addstr(stdscr, y, x, text, attr=0):
122
+ """Safely add a string to the screen, handling boundary errors."""
123
+ height, width = stdscr.getmaxyx()
124
+ if y < 0 or y >= height or x < 0 or x >= width:
125
+ return
126
+
127
+ # Truncate the string if it would go off-screen
128
+ max_len = width - x
129
+ if max_len <= 0:
130
+ return
131
+
132
+ display_text = text[:max_len]
133
+
134
+ try:
135
+ stdscr.addstr(y, x, display_text, attr)
136
+ except curses.error:
137
+ # Safely handle any curses errors
138
+ pass
139
+
140
+ def main(stdscr, json_path, output_dir):
141
+ # --- Curses Setup ---
142
+ curses.curs_set(0) # Hide cursor
143
+ curses.use_default_colors()
144
+ curses.init_pair(1, curses.COLOR_GREEN, -1) # Success / Bar
145
+ curses.init_pair(2, curses.COLOR_WHITE, -1) # Normal text
146
+ curses.init_pair(3, curses.COLOR_CYAN, -1) # Labels
147
+ curses.init_pair(4, curses.COLOR_RED, -1) # Error / Warning
148
+ curses.init_pair(5, curses.COLOR_YELLOW, -1) # Info
149
+
150
+ # For tracking download rate
151
+ last_stats = None
152
+ start_time = time.time()
153
+
154
+ while True:
155
+ stdscr.erase()
156
+ height, width = stdscr.getmaxyx()
157
+
158
+ # --- Data Loading ---
159
+ total, completed, pending, recent_files, download_rate, eta, file_sizes = analyze_progress(
160
+ json_path, output_dir, last_stats
161
+ )
162
+
163
+ # Update stats for next iteration
164
+ last_stats = {
165
+ 'completed': completed,
166
+ 'timestamp': time.time()
167
+ }
168
+
169
+ # Calculate total downloaded size
170
+ total_size = sum(file_sizes.values())
171
+
172
+ # --- Drawing UI ---
173
+ # Header
174
+ header = f"Download Monitor - {os.path.basename(json_path)}"
175
+ safe_addstr(stdscr, 0, 2, header, curses.A_BOLD)
176
+ safe_addstr(stdscr, 1, 2, f"Output: {os.path.basename(output_dir)}", curses.A_DIM)
177
+ safe_addstr(stdscr, 2, 2, "Press 'q' to quit, 'r' to refresh", curses.A_DIM)
178
+
179
+ try:
180
+ stdscr.hline(3, 0, "-", width)
181
+ except curses.error:
182
+ pass
183
+
184
+ y_pos = 5
185
+
186
+ # Stats Section
187
+ if total == 0:
188
+ safe_addstr(stdscr, y_pos, 4, "Waiting for data or JSON not found...", curses.color_pair(4) | curses.A_BOLD)
189
+ y_pos += 2
190
+ else:
191
+ # Stats header
192
+ safe_addstr(stdscr, y_pos, 4, "Download Statistics", curses.color_pair(3) | curses.A_BOLD)
193
+ y_pos += 2
194
+
195
+ # Left column stats
196
+ safe_addstr(stdscr, y_pos, 4, f"Total Videos: {total}", curses.color_pair(2))
197
+ safe_addstr(stdscr, y_pos + 1, 4, f"Completed: {completed}", curses.color_pair(1))
198
+ safe_addstr(stdscr, y_pos + 2, 4, f"Pending: {pending}", curses.color_pair(5))
199
+
200
+ # Right column stats if there's enough space
201
+ if width > 50:
202
+ right_col = width // 2
203
+ safe_addstr(stdscr, y_pos, right_col, f"Total Size: {format_size(total_size)}", curses.color_pair(2))
204
+ safe_addstr(stdscr, y_pos + 1, right_col, f"Rate: {download_rate:.1f} videos/min", curses.color_pair(1))
205
+
206
+ if eta > 0:
207
+ eta_str = format_time(eta)
208
+ safe_addstr(stdscr, y_pos + 2, right_col, f"ETA: {eta_str}", curses.color_pair(5))
209
+ else:
210
+ safe_addstr(stdscr, y_pos + 2, right_col, "ETA: calculating...", curses.color_pair(5))
211
+
212
+ y_pos += 4
213
+
214
+ # Progress Bar
215
+ percentage = (completed / total) * 100 if total > 0 else 0
216
+ draw_progress_bar(stdscr, y_pos, 4, width - 8, "Progress: ", percentage, curses.color_pair(1))
217
+ y_pos += 2
218
+
219
+ # Runtime
220
+ runtime = time.time() - start_time
221
+ runtime_str = format_time(runtime)
222
+ safe_addstr(stdscr, y_pos, 4, f"Runtime: {runtime_str}", curses.color_pair(2))
223
+ y_pos += 2
224
+
225
+ # Separator
226
+ try:
227
+ stdscr.hline(y_pos, 0, "-", width)
228
+ except curses.error:
229
+ pass
230
+
231
+ y_pos += 1
232
+
233
+ # Recent Files Section
234
+ safe_addstr(stdscr, y_pos, 2, "Recently Completed Files:", curses.color_pair(3) | curses.A_BOLD)
235
+ y_pos += 2
236
+
237
+ if recent_files:
238
+ for i, (filename, mtime, size, title, video_id) in enumerate(recent_files):
239
+ if y_pos >= height - 1:
240
+ break
241
+
242
+ # Format the time
243
+ time_str = datetime.fromtimestamp(mtime).strftime("%H:%M:%S")
244
+ size_str = format_size(size)
245
+
246
+ # Display title if available, otherwise filename
247
+ display_name = title if title != 'Unknown' else filename
248
+
249
+ # Truncate long names
250
+ max_name_len = width - 25 if width > 25 else 10
251
+ if len(display_name) > max_name_len:
252
+ display_name = display_name[:max_name_len-3] + "..."
253
+
254
+ file_info = f"{display_name} ({size_str}, {time_str})"
255
+ safe_addstr(stdscr, y_pos, 4, file_info, curses.color_pair(2))
256
+ y_pos += 1
257
+ else:
258
+ safe_addstr(stdscr, y_pos, 4, "(No completed files found yet)", curses.color_pair(4))
259
+
260
+ stdscr.refresh()
261
+
262
+ # Non-blocking input with a timeout
263
+ stdscr.timeout(REFRESH_INTERVAL_SECONDS * 1000)
264
+ key = stdscr.getch()
265
+ if key == ord('q'):
266
+ break
267
+ elif key == ord('r'):
268
+ # Force refresh
269
+ continue
270
+
271
+ if __name__ == "__main__":
272
+ parser = argparse.ArgumentParser(
273
+ description="A curses-based monitor for the video downloader script.",
274
+ formatter_class=argparse.RawTextHelpFormatter
275
+ )
276
+ parser.add_argument(
277
+ "json_file",
278
+ type=str,
279
+ help="Path to the JSON file containing video information (the same one used by the downloader)."
280
+ )
281
+ parser.add_argument(
282
+ "output_dir",
283
+ type=str,
284
+ help="Path to the output directory where videos are being saved."
285
+ )
286
+ args = parser.parse_args()
287
+
288
+ print(f"Starting download monitor...")
289
+ print(f" - Watching JSON: {args.json_file}")
290
+ print(f" - Watching Dir: {args.output_dir}")
291
+ print("\nPress 'q' in the monitor window to exit.")
292
+ time.sleep(1)
293
+
294
+ try:
295
+ curses.wrapper(main, args.json_file, args.output_dir)
296
+ except curses.error as e:
297
+ print(f"\nCurses error: {e}")
298
+ print("Your terminal window might be too small to run the monitor.")
299
+ except KeyboardInterrupt:
300
+ print("\nMonitor stopped.")
vatex_training_v1.0.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a3b5f08e354d9543ef4f1ab004f9db9bc4e5da49d9692f0c8c6aa3bef9751c4
3
+ size 57319458