codelion commited on
Commit
314657b
1 Parent(s): 0daa810

Upload 2 files

Browse files
Files changed (2) hide show
  1. _script_for_eval.py +231 -0
  2. _script_for_gen.py +132 -0
_script_for_eval.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+ import json
4
+ import numpy as np
5
+ from tqdm import tqdm
6
+ import nltk
7
+ from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
8
+ from rouge import Rouge
9
+ from sklearn.feature_extraction.text import TfidfVectorizer
10
+ from sklearn.metrics.pairwise import cosine_similarity
11
+ import re
12
+ from textstat import flesch_reading_ease
13
+ from datasets import load_dataset
14
+ import openai
15
+ from datetime import datetime
16
+
17
+ nltk.download('punkt', quiet=True)
18
+ nltk.download('averaged_perceptron_tagger', quiet=True)
19
+
20
+ def preprocess(text):
21
+ return nltk.word_tokenize(text.lower())
22
+
23
+ def calculate_bleu(reference, candidate):
24
+ reference_tokens = preprocess(reference)
25
+ candidate_tokens = preprocess(candidate)
26
+ smoothie = SmoothingFunction().method1
27
+ return sentence_bleu([reference_tokens], candidate_tokens, smoothing_function=smoothie)
28
+
29
+ def calculate_rouge(reference, candidate):
30
+ rouge = Rouge()
31
+ scores = rouge.get_scores(candidate, reference)
32
+ return {
33
+ 'rouge-1': scores[0]['rouge-1']['f'],
34
+ 'rouge-2': scores[0]['rouge-2']['f'],
35
+ 'rouge-l': scores[0]['rouge-l']['f']
36
+ }
37
+
38
+ def calculate_cosine_similarity(reference, candidate):
39
+ vectorizer = TfidfVectorizer()
40
+ tfidf_matrix = vectorizer.fit_transform([reference, candidate])
41
+ return cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:2])[0][0]
42
+
43
+ def extract_sections(readme):
44
+ sections = []
45
+ current_section = ""
46
+ for line in readme.split('\n'):
47
+ if line.strip().startswith('#'):
48
+ if current_section:
49
+ sections.append(current_section.strip())
50
+ current_section = line + "\n"
51
+ else:
52
+ current_section += line + "\n"
53
+ if current_section:
54
+ sections.append(current_section.strip())
55
+ return sections
56
+
57
+ def calculate_structural_similarity(reference, candidate):
58
+ ref_sections = extract_sections(reference)
59
+ cand_sections = extract_sections(candidate)
60
+
61
+ section_diff = abs(len(ref_sections) - len(cand_sections))
62
+
63
+ ref_titles = [s.split('\n')[0] for s in ref_sections]
64
+ cand_titles = [s.split('\n')[0] for s in cand_sections]
65
+ title_similarity = len(set(ref_titles) & set(cand_titles)) / max(len(ref_titles), len(cand_titles))
66
+
67
+ return {
68
+ 'section_difference': section_diff,
69
+ 'title_similarity': title_similarity
70
+ }
71
+
72
+ def information_retrieval_score(readme):
73
+ key_sections = ['installation', 'usage', 'api', 'example', 'license']
74
+ found_sections = sum(1 for section in key_sections if section in readme.lower())
75
+ return found_sections / len(key_sections)
76
+
77
+ def code_readme_consistency(repo_content, readme):
78
+ code_elements = set(re.findall(r'def\s+(\w+)', repo_content) +
79
+ re.findall(r'class\s+(\w+)', repo_content))
80
+
81
+ mentioned_elements = sum(1 for element in code_elements if element in readme)
82
+
83
+ return mentioned_elements / len(code_elements) if code_elements else 0
84
+
85
+ def calculate_readability(text):
86
+ return flesch_reading_ease(text) / 100
87
+
88
+ def evaluate_readme(reference_readme, generated_readme, repo_content):
89
+ bleu_score = calculate_bleu(reference_readme, generated_readme)
90
+ rouge_scores = calculate_rouge(reference_readme, generated_readme)
91
+ cosine_sim = calculate_cosine_similarity(reference_readme, generated_readme)
92
+ structural_sim = calculate_structural_similarity(reference_readme, generated_readme)
93
+ info_retrieval = information_retrieval_score(generated_readme)
94
+ code_consistency = code_readme_consistency(repo_content, generated_readme)
95
+ readability = calculate_readability(generated_readme)
96
+
97
+ weights = {
98
+ 'bleu': 0.1,
99
+ 'rouge-1': 0.1,
100
+ 'rouge-2': 0.1,
101
+ 'rouge-l': 0.1,
102
+ 'cosine_similarity': 0.1,
103
+ 'structural_similarity': 0.1,
104
+ 'information_retrieval': 0.15,
105
+ 'code_consistency': 0.15,
106
+ 'readability': 0.1
107
+ }
108
+
109
+ weighted_score = (
110
+ weights['bleu'] * bleu_score +
111
+ weights['rouge-1'] * rouge_scores['rouge-1'] +
112
+ weights['rouge-2'] * rouge_scores['rouge-2'] +
113
+ weights['rouge-l'] * rouge_scores['rouge-l'] +
114
+ weights['cosine_similarity'] * cosine_sim +
115
+ weights['structural_similarity'] * structural_sim['title_similarity'] +
116
+ weights['information_retrieval'] * info_retrieval +
117
+ weights['code_consistency'] * code_consistency +
118
+ weights['readability'] * readability
119
+ )
120
+
121
+ return {
122
+ 'bleu': bleu_score,
123
+ 'rouge': rouge_scores,
124
+ 'cosine_similarity': cosine_sim,
125
+ 'structural_similarity': structural_sim,
126
+ 'information_retrieval': info_retrieval,
127
+ 'code_consistency': code_consistency,
128
+ 'readability': readability,
129
+ 'weighted_score': weighted_score
130
+ }
131
+
132
+ def generate_readme(repo_content, model, client):
133
+ system_prompt = """You are an AI assistant tasked with creating a README.md file for a GitHub repository.
134
+ Your response should contain ONLY the content of the README.md file, without any additional explanations or markdown code blocks.
135
+ The README should include the following sections:
136
+ 1. Project Title
137
+ 2. Description
138
+ 3. Installation
139
+ 4. Usage
140
+ 5. Features
141
+ 6. Contributing
142
+ 7. License
143
+ Ensure that your response is well-structured, informative, and directly usable as a README.md file."""
144
+
145
+ user_prompt = f"Here is the content of the repository:\n\n{repo_content}\n\nBased on this content, please generate a README.md file."
146
+
147
+ response = client.chat.completions.create(
148
+ model=model,
149
+ messages=[
150
+ {"role": "system", "content": system_prompt},
151
+ {"role": "user", "content": user_prompt}
152
+ ]
153
+ )
154
+
155
+ return response.choices[0].message.content
156
+
157
+ def main(args):
158
+ openai.api_key = os.getenv("OPENAI_API_KEY")
159
+ if not openai.api_key:
160
+ raise ValueError("OPENAI_API_KEY environment variable is not set")
161
+
162
+ client = openai.OpenAI(base_url=args.base_url) if args.base_url else openai.OpenAI()
163
+
164
+ dataset = load_dataset("patched-codes/generate-readme-eval")
165
+
166
+ results = []
167
+
168
+ for item in tqdm(dataset['test'], desc="Processing repos"):
169
+ try:
170
+ generated_readme = generate_readme(item['repo_content'], args.model, client)
171
+ eval_result = evaluate_readme(item['repo_readme'], generated_readme, item['repo_content'])
172
+ # Add repo_name to the eval_result
173
+ eval_result['repo_name'] = item['repo_name']
174
+ results.append(eval_result)
175
+ except Exception as e:
176
+ print(f"Error processing repo {item['repo_name']}: {e}")
177
+ continue
178
+
179
+ average_scores = {
180
+ 'bleu': np.mean([r['bleu'] for r in results]),
181
+ 'rouge-1': np.mean([r['rouge']['rouge-1'] for r in results]),
182
+ 'rouge-2': np.mean([r['rouge']['rouge-2'] for r in results]),
183
+ 'rouge-l': np.mean([r['rouge']['rouge-l'] for r in results]),
184
+ 'cosine_similarity': np.mean([r['cosine_similarity'] for r in results]),
185
+ 'title_similarity': np.mean([r['structural_similarity']['title_similarity'] for r in results]),
186
+ 'information_retrieval': np.mean([r['information_retrieval'] for r in results]),
187
+ 'code_consistency': np.mean([r['code_consistency'] for r in results]),
188
+ 'readability': np.mean([r['readability'] for r in results]),
189
+ 'weighted_score': np.mean([r['weighted_score'] for r in results])
190
+ }
191
+
192
+ # Print results to console
193
+ print("\nEvaluation Results:")
194
+ for metric, score in average_scores.items():
195
+ print(f"{metric}: {score:.4f}")
196
+
197
+ # Save results to log file
198
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
199
+ log_filename = f"{args.model}_results_{timestamp}.log"
200
+
201
+ with open(log_filename, 'w') as log_file:
202
+ log_file.write(f"Evaluation Results for model: {args.model}\n")
203
+ log_file.write(f"Timestamp: {timestamp}\n\n")
204
+ log_file.write("Average Scores:\n")
205
+ for metric, score in average_scores.items():
206
+ log_file.write(f"{metric}: {score:.4f}\n")
207
+
208
+ log_file.write(f"\nDetailed Results:\n")
209
+ for result in results:
210
+ log_file.write(f"\nRepository: {result['repo_name']}\n")
211
+ log_file.write("Scores:\n")
212
+ log_file.write(f" BLEU: {result['bleu']:.4f}\n")
213
+ log_file.write(f" ROUGE-1: {result['rouge']['rouge-1']:.4f}\n")
214
+ log_file.write(f" ROUGE-2: {result['rouge']['rouge-2']:.4f}\n")
215
+ log_file.write(f" ROUGE-L: {result['rouge']['rouge-l']:.4f}\n")
216
+ log_file.write(f" Cosine Similarity: {result['cosine_similarity']:.4f}\n")
217
+ log_file.write(f" Title Similarity: {result['structural_similarity']['title_similarity']:.4f}\n")
218
+ log_file.write(f" Information Retrieval: {result['information_retrieval']:.4f}\n")
219
+ log_file.write(f" Code Consistency: {result['code_consistency']:.4f}\n")
220
+ log_file.write(f" Readability: {result['readability']:.4f}\n")
221
+ log_file.write(f" Weighted Score: {result['weighted_score']:.4f}\n")
222
+
223
+ print(f"\nResults saved to {log_filename}")
224
+
225
+ if __name__ == "__main__":
226
+ parser = argparse.ArgumentParser(description="Generate and evaluate README files using OpenAI API")
227
+ parser.add_argument("model", help="OpenAI model to use")
228
+ parser.add_argument("--base_url", help="Optional base URL for OpenAI API", default=None)
229
+ args = parser.parse_args()
230
+
231
+ main(args)
_script_for_gen.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import json
4
+ import logging
5
+ from github import Github
6
+ import tiktoken
7
+ from pathlib import Path
8
+ import shutil
9
+
10
+ # Set up logging
11
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
12
+
13
+ # Initialize GitHub client
14
+ github_token = os.getenv('GITHUB_TOKEN')
15
+ if not github_token:
16
+ raise ValueError("GITHUB_TOKEN environment variable is not set")
17
+ g = Github(github_token)
18
+
19
+ # Initialize tokenizer
20
+ tokenizer = tiktoken.get_encoding("cl100k_base")
21
+
22
+ def clone_repo(repo_url, repo_name):
23
+ tmp_dir = f"/tmp/{repo_name}"
24
+ subprocess.run(["git", "clone", "--depth", "1", repo_url, tmp_dir], check=True)
25
+ return tmp_dir
26
+
27
+ def get_repo_content(repo_dir):
28
+ content = []
29
+ for root, dirs, files in os.walk(repo_dir):
30
+ if "test" in root.lower() or "example" in root.lower():
31
+ continue
32
+ for file in files:
33
+ if file.endswith('.py'):
34
+ file_path = os.path.join(root, file)
35
+ relative_path = os.path.relpath(file_path, repo_dir)
36
+ with open(file_path, 'r') as f:
37
+ file_content = f.read()
38
+ content.append(f"File: {relative_path}\n\n{file_content}\n\n")
39
+ return "\n".join(content)
40
+
41
+ def count_tokens(text):
42
+ return len(tokenizer.encode(text))
43
+
44
+ def process_repo(repo):
45
+ repo_name = repo.name
46
+ repo_url = repo.clone_url
47
+ logging.info(f"Processing repository: {repo_name}")
48
+
49
+ try:
50
+ tmp_dir = clone_repo(repo_url, repo_name)
51
+ readme_path = os.path.join(tmp_dir, "README.md")
52
+
53
+ if not os.path.exists(readme_path):
54
+ logging.info(f"README.md not found in {repo_name}")
55
+ return None
56
+
57
+ repo_content = get_repo_content(tmp_dir)
58
+ if count_tokens(repo_content) >= 100000:
59
+ logging.info(f"Repository {repo_name} content exceeds 100k tokens")
60
+ return None
61
+
62
+ with open(readme_path, 'r') as f:
63
+ readme_content = f.read()
64
+
65
+ repo_commit = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=tmp_dir).decode().strip()
66
+
67
+ return {
68
+ "repo_name": repo_name,
69
+ "repo_commit": repo_commit,
70
+ "repo_content": repo_content,
71
+ "repo_readme": readme_content
72
+ }
73
+ except Exception as e:
74
+ logging.error(f"Error processing repository {repo_name}: {str(e)}")
75
+ return None
76
+ finally:
77
+ if 'tmp_dir' in locals():
78
+ shutil.rmtree(tmp_dir)
79
+
80
+ def load_existing_data(filename):
81
+ existing_data = {}
82
+ if os.path.exists(filename):
83
+ with open(filename, "r") as f:
84
+ for line in f:
85
+ item = json.loads(line)
86
+ existing_data[item['repo_name']] = item
87
+ return existing_data
88
+
89
+ def save_dataset(filename, dataset, mode='a'):
90
+ with open(filename, mode) as f:
91
+ for item in dataset:
92
+ json.dump(item, f)
93
+ f.write("\n")
94
+
95
+ def main():
96
+ g = Github(os.getenv('GITHUB_TOKEN'))
97
+ filename = "generate-readme-eval.jsonl"
98
+ existing_data = load_existing_data(filename)
99
+
100
+ new_dataset = []
101
+ updated_count = 0
102
+ skipped_count = 0
103
+
104
+ repos = g.search_repositories(query="language:python stars:>1000 forks:>100", sort="stars", order="desc")
105
+
106
+ for i, repo in enumerate(repos[200:400]):
107
+ if repo.full_name in existing_data:
108
+ existing_item = existing_data[repo.full_name]
109
+ if existing_item['repo_commit'] == repo.get_commits()[0].sha:
110
+ skipped_count += 1
111
+ logging.info(f"Skipped {repo.full_name}: Already processed with same commit")
112
+ continue
113
+ else:
114
+ logging.info(f"Updating {repo.full_name}: Commit changed")
115
+ updated_count += 1
116
+
117
+ item = process_repo(repo)
118
+ if item:
119
+ new_dataset.append(item)
120
+
121
+ if i % 10 == 0:
122
+ logging.info(f"Processed {i+1} repositories")
123
+
124
+ # Append new and updated items to the file
125
+ save_dataset(filename, new_dataset, mode='a')
126
+
127
+ logging.info(f"Dataset updated with {len(new_dataset)} new/updated items")
128
+ logging.info(f"Skipped {skipped_count} repositories (no changes)")
129
+ logging.info(f"Updated {updated_count} repositories")
130
+
131
+ if __name__ == "__main__":
132
+ main()