File size: 4,307 Bytes
314657b 058ded2 314657b 058ded2 314657b 058ded2 314657b 058ded2 314657b 058ded2 314657b 058ded2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
import os
import subprocess
import json
import logging
from github import Github
import tiktoken
from pathlib import Path
import shutil
# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# Initialize GitHub client
github_token = os.getenv('GITHUB_TOKEN')
if not github_token:
raise ValueError("GITHUB_TOKEN environment variable is not set")
g = Github(github_token)
# Initialize tokenizer
tokenizer = tiktoken.get_encoding("cl100k_base")
def clone_repo(repo_url, repo_name):
tmp_dir = f"/tmp/{repo_name}"
subprocess.run(["git", "clone", "--depth", "1", repo_url, tmp_dir], check=True)
return tmp_dir
def get_repo_content(repo_dir):
content = []
for root, dirs, files in os.walk(repo_dir):
if "test" in root.lower() or "example" in root.lower():
continue
for file in files:
if file.endswith('.py'):
file_path = os.path.join(root, file)
relative_path = os.path.relpath(file_path, repo_dir)
with open(file_path, 'r') as f:
file_content = f.read()
content.append(f"File: {relative_path}\n\n{file_content}\n\n")
return "\n".join(content)
def count_tokens(text):
return len(tokenizer.encode(text))
def process_repo(repo):
repo_name = repo.name
repo_url = repo.clone_url
logging.info(f"Processing repository: {repo_name}")
try:
tmp_dir = clone_repo(repo_url, repo_name)
readme_path = os.path.join(tmp_dir, "README.md")
if not os.path.exists(readme_path):
logging.info(f"README.md not found in {repo_name}")
return None
repo_content = get_repo_content(tmp_dir)
if count_tokens(repo_content) >= 100000:
logging.info(f"Repository {repo_name} content exceeds 100k tokens")
return None
with open(readme_path, 'r') as f:
readme_content = f.read()
repo_commit = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=tmp_dir).decode().strip()
return {
"repo_name": repo_name,
"repo_commit": repo_commit,
"repo_content": repo_content,
"repo_readme": readme_content
}
except Exception as e:
logging.error(f"Error processing repository {repo_name}: {str(e)}")
return None
finally:
if 'tmp_dir' in locals():
shutil.rmtree(tmp_dir)
def load_existing_data(filename):
existing_data = {}
if os.path.exists(filename):
with open(filename, "r") as f:
for line in f:
item = json.loads(line)
existing_data[item['repo_name']] = item
return existing_data
def save_dataset(filename, dataset, mode='a'):
with open(filename, mode) as f:
for item in dataset:
json.dump(item, f)
f.write("\n")
def main():
g = Github(os.getenv('GITHUB_TOKEN'))
filename = "generate-readme-eval.jsonl"
existing_data = load_existing_data(filename)
new_dataset = []
updated_count = 0
skipped_count = 0
repos = g.search_repositories(query="language:python stars:>1000 forks:>100", sort="stars", order="desc")
for i, repo in enumerate(repos[200:400]):
if repo.full_name in existing_data:
existing_item = existing_data[repo.full_name]
if existing_item['repo_commit'] == repo.get_commits()[0].sha:
skipped_count += 1
logging.info(f"Skipped {repo.full_name}: Already processed with same commit")
continue
else:
logging.info(f"Updating {repo.full_name}: Commit changed")
updated_count += 1
item = process_repo(repo)
if item:
new_dataset.append(item)
if i % 10 == 0:
logging.info(f"Processed {i+1} repositories")
# Append new and updated items to the file
save_dataset(filename, new_dataset, mode='a')
logging.info(f"Dataset updated with {len(new_dataset)} new/updated items")
logging.info(f"Skipped {skipped_count} repositories (no changes)")
logging.info(f"Updated {updated_count} repositories")
if __name__ == "__main__":
main()
|