KaraKaraWitch's picture
Upload folder using huggingface_hub
cf9e502 verified
raw
history blame
9.72 kB
import html
import multiprocessing
import pathlib
from typing import Callable
import msgspec
import typer
import zstandard
from loguru import logger
from RedditModels import (
RedditAuthor,
RedditComment,
RedditFlair,
RedditSubmit,
RedditSubreddit,
)
root_app = typer.Typer()
def read_and_decode(
reader, chunk_size, max_window_size, previous_chunk=None, bytes_read=0
):
chunk = reader.read(chunk_size)
bytes_read += chunk_size
if previous_chunk is not None:
chunk = previous_chunk + chunk
try:
return chunk.decode()
except UnicodeDecodeError:
if bytes_read > max_window_size:
raise UnicodeError(
f"Unable to decode frame after reading {bytes_read:,} bytes"
)
logger.debug(f"Decoding error with {bytes_read:,} bytes, reading another chunk")
return read_and_decode(reader, chunk_size, max_window_size, chunk, bytes_read)
GB = 2**30
def read_lines_jsonl(file_name, chunk_size=GB // 2):
with open(file_name, "rb") as file_handle:
buffer = b""
while True:
chunk = file_handle.read(chunk_size)
if not chunk:
break
lines = (buffer + chunk).split(b"\n")
for line in lines[:-1]:
yield line.strip(), file_handle.tell()
buffer = lines[-1]
def read_lines_zst(file_name, scale: float = 1):
with open(file_name, "rb") as file_handle:
buffer = ""
reader = zstandard.ZstdDecompressor(
max_window_size=int((2**31) * scale)
).stream_reader(file_handle)
while True:
chunk = read_and_decode(
reader, int((2**27) * scale), int((2**29) * 2 * scale)
)
if not chunk:
break
lines = (buffer + chunk).split("\n")
for line in lines[:-1]:
yield line.strip(), file_handle.tell()
buffer = lines[-1]
reader.close()
def error_cb(err):
logger.exception(err)
def get_submission_flags(data: dict):
flag_map = {
"!": "spoiler",
"#": "stickied",
">": "pinned",
"A": "archived",
"C": "is_crosspostable",
"c": "is_original_content",
"E": "edited",
"e": "is_meta",
"G": "can_gild",
"H": "hidden",
"i": "is_robot_indexable",
"L": "allow_live_comments",
"l": "locked",
"m": "is_reddit_media_domain",
"M": "over_18",
"O": "contest_mode",
"q": "quarantine",
"s": "is_self",
"v": "is_video",
}
return "".join(flag for flag, key in flag_map.items() if data.get(key))
def get_comment_flags(data: dict):
flag_map = {
"#": "stickied",
"A": "archived",
"E": "edited",
"G": "can_gild",
"H": "hidden",
"l": "locked",
"=": "score_hidden",
"P": "author_premium",
"R": "send_replies",
"O": "can_mod_post",
"N": "no_follow",
}
return "".join(flag for flag, key in flag_map.items() if data.get(key))
def get_reddit_flair(data: dict, prefix: str):
return RedditFlair(
bg=data.get(f"{prefix}_flair_background_color"),
css_cls=data.get(f"{prefix}_flair_css_class"),
template=data.get(f"{prefix}_flair_template_id"),
richtext=data.get(f"{prefix}_flair_richtext"),
text=data.get(f"{prefix}_flair_text"),
text_color=data.get(f"{prefix}_flair_text_color"),
type=data.get(f"{prefix}_flair_type"),
)
def make_submission(data: dict, file_id: pathlib.Path):
# Create Author
author = None
if data.get("author_created_utc"):
author_flair = get_reddit_flair(data, "author")
author_fullname = data.get("author_fullname", "")
afn = (
author_fullname[3:]
if author_fullname.startswith("t2_")
else author_fullname
)
author = RedditAuthor(
name=data.get("author", ""),
uid=afn,
create=data.get("author_created_utc", -1),
flair=author_flair if author_flair.is_flaired else None,
patreon=data.get("author_patreon_flair", False),
premium=data.get("author_premium", False),
)
# Create Subreddit
subreddit_id = data.get("subreddit_id", "")
if subreddit_id is not None:
subid = subreddit_id[3:] if subreddit_id.startswith("t5_") else subreddit_id
else:
return None
subreddit = RedditSubreddit(
name=data.get("subreddit", "<?>"),
id=subid,
# Number of subs and type can be None.
subs=data.get("subreddit_subscribers", None),
type=data.get("subreddit_type", None),
)
link_flair = get_reddit_flair(data, "link")
submission = RedditSubmit(
sub=subreddit,
author=author,
id=data.get("id", None),
score=data.get("score", 0) if data.get("score", 0) else 0,
created=float(data.get("created_utc", 0.0))
if data.get("created_utc", 0.0)
else -1.0,
title=data.get("title", None),
flags=get_submission_flags(data),
link_flair=link_flair if link_flair.is_flaired else None,
url=data.get("url"),
text=data.get("selftext", None),
)
if submission.text == "[removed]":
submission.removed = [
data.get("removal_reason"),
data.get("removed_by"),
data.get("removed_by_category"),
]
if data.get("crosspost_parent_list", []):
submission.cross = []
for crosspost in data.get("crosspost_parent_list", []):
post = make_submission(crosspost, file_id)
if post is None:
continue
submission.cross.append(post)
return submission
def make_comment(data: dict):
author = data.get("author", "")
if author is None or author.lower() == "[deleted]":
author = None
else:
author_flair = get_reddit_flair(data, "author")
author_fullname = data.get("author_fullname", "")
afn = (
author_fullname[3:]
if author_fullname.startswith("t2_")
else author_fullname
)
author = RedditAuthor(
name=data.get("author", ""),
uid=afn,
create=data.get("author_created_utc", -1),
flair=author_flair if author_flair.is_flaired else None,
patreon=data.get("author_patreon_flair", False),
premium=data.get("author_premium", False),
)
subreddit_id = data.get("subreddit_id", "")
if subreddit_id is not None:
subid = subreddit_id[3:] if subreddit_id.startswith("t5_") else subreddit_id
else:
return None
subreddit = RedditSubreddit(
name=data.get("subreddit", ""),
id=subid,
subs=data.get("subreddit_subscribers", -1),
type=data.get("subreddit_type", ""),
)
text = html.unescape(data.get("body", "")).replace("\r\n", "\n")
if author is None and text in ["[deleted]", "[removed]"]:
text = None
submission = RedditComment(
sub=subreddit,
author=author,
id=data.get("id", ""),
score=data.get("score", 0),
created=data.get("created_utc", 0),
thread_id=data.get("link_id", ""),
parent_id=data.get("parent_id", ""),
text=text,
flags=get_comment_flags(data),
)
# rich.print(submission)
return submission
# Base processor
def process_zst(
input_file: pathlib.Path, output_file: pathlib.Path, processor_fn: Callable
):
decoder = msgspec.json.Decoder()
encoder = msgspec.json.Encoder(decimal_format="number")
write = output_file.with_stem(f"{output_file.stem}_{input_file.stem}")
with open(write, "wb") as f:
for lineidx, line in enumerate(read_lines_zst(input_file)):
data, idx = line
try:
post = processor_fn(decoder.decode(data))
if post:
f.write(encoder.encode(post) + b"\n")
except msgspec.DecodeError:
logger.warning("Decode error detected. Continuing...")
if lineidx % 100_000 == 0:
logger.info(f"{write} {lineidx} processed")
logger.info(f"{write} done")
# Typer Commands
@root_app.command(name="submissions")
def process_submissions(zst_input: pathlib.Path, output_prefix: pathlib.Path):
s = sorted(list(zst_input.iterdir()))
# s = list(pathlib.Path("reddit/submissions").iterdir())
with multiprocessing.Pool(processes=32) as pooled:
results = []
for file in s:
results.append(
pooled.apply_async(
process_zst,
args=(file, output_prefix, make_submission),
error_callback=error_cb,
)
)
[result.wait() for result in results]
@root_app.command(name="comments")
def process_comments(zst_input: pathlib.Path, output_prefix: pathlib.Path):
s = sorted(list(zst_input.iterdir()))
# s = list(pathlib.Path("reddit/submissions").iterdir())
with multiprocessing.Pool(processes=32) as pooled:
results = []
for file in s:
results.append(
pooled.apply_async(
process_zst,
args=(file, output_prefix, make_submission),
error_callback=error_cb,
)
)
[result.wait() for result in results]
# Typer Commands for filtering with `sub_selects.jsonl`
if __name__ == "__main__":
root_app()