Dataset Viewer
Auto-converted to Parquet
problem_id
int64
1
113
programming_language
stringclasses
2 values
original_code
stringlengths
0
29.4k
highlighted_code
stringlengths
0
6.05k
instruction
stringlengths
5
5.17k
test_code
stringlengths
553
29.5k
requirements
stringlengths
18
122
conftest
stringclasses
3 values
test_utils
stringclasses
7 values
split
stringclasses
1 value
package_json
nullclasses
9 values
jest_setup
nullclasses
9 values
babel_config
nullclasses
5 values
other_files
null
jest_dom_setup
nullclasses
1 value
1
python
import torch.nn as nn import torch.nn.functional as F class SimpleConvNet3(nn.Module): def __init__(self): super(SimpleConvNet3, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1) self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1) self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1) self.fc1 = nn.Linear(256 * 16 * 16, 512) self.fc2 = nn.Linear(512, 3) # 3 output classes def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = F.relu(self.conv3(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = F.relu(self.conv4(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = x.view(x.size(0), -1) # Flatten the tensor x = F.relu(self.fc1(x)) x = self.fc2(x) return x
class SimpleConvNet3(nn.Module): def __init__(self): super(SimpleConvNet3, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1) self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1) self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1) self.fc1 = nn.Linear(256 * 16 * 16, 512) self.fc2 = nn.Linear(512, 3) # 3 output classes def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = F.relu(self.conv3(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = F.relu(self.conv4(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = x.view(x.size(0), -1) # Flatten the tensor x = F.relu(self.fc1(x)) x = self.fc2(x) return x
3. Попробуйте добавить Dropout на слои своей сверточной сети, не используя BatchNorm.
# test_dropout_no_batchnorm.py import pytest import inspect import torch.nn as nn def find_model_class(module): """Locate the first nn.Module subclass in the implementation module.""" for _, obj in inspect.getmembers(module, inspect.isclass): if issubclass(obj, nn.Module) and obj is not nn.Module: return obj pytest.skip(f"{module.__name__}: no nn.Module subclass found") def get_model_instance(module): """Instantiate the model class, or skip if it fails.""" ModelCls = find_model_class(module) try: return ModelCls() except Exception as e: pytest.skip(f"{module.__name__}: cannot instantiate model: {e}") def count_dropout_and_batchnorm(model): """ Walk the model graph and count how many Dropout* and BatchNorm* layers it has. Returns (dropout_count, batchnorm_count). """ dropouts = 0 batchnorms = 0 for layer in model.modules(): if isinstance(layer, (nn.Dropout, nn.Dropout1d, nn.Dropout2d, nn.Dropout3d)): dropouts += 1 if isinstance(layer, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)): batchnorms += 1 return dropouts, batchnorms def test_dropout_layers_present(implementation): """ Model must include at least one Dropout layer. """ impl_name, module = implementation model = get_model_instance(module) dropouts, _ = count_dropout_and_batchnorm(model) assert dropouts > 0, ( f"{impl_name}: found {dropouts} Dropout layers; expected at least one." ) def test_no_batchnorm_layers(implementation): """ Model must NOT include any BatchNorm layers. """ impl_name, module = implementation model = get_model_instance(module) _, batchnorms = count_dropout_and_batchnorm(model) assert batchnorms == 0, ( f"{impl_name}: found {batchnorms} BatchNorm layers; remove all BatchNorm uses." )
pytest pytest-mock torch numpy
import pytest import os import sys import json from typing import Dict, List, Optional, Any # Import from local test_utils.py in the same directory from test_utils import TestUtils, TestResultsManager # Load all implementations in the current sandbox implementations = TestUtils.load_all_implementations() test_results = TestResultsManager() @pytest.fixture(scope="session") def sandbox_dir(): """Fixture to provide the sandbox directory path.""" return os.path.dirname(os.path.abspath(__file__)) @pytest.fixture(scope="session") def sandbox_name(): """Fixture to provide the sandbox name.""" return os.path.basename(os.path.dirname(os.path.abspath(__file__))) @pytest.fixture(scope="session") def all_implementations(): """Fixture to provide all implementations as a dictionary.""" return implementations @pytest.fixture(params=list(implementations.items())) def implementation(request): """Fixture to provide each implementation to tests one at a time.""" return request.param @pytest.fixture(scope="session") def results_manager(): """Fixture to provide access to the test results manager.""" return test_results # Hook for collecting test results @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Pytest hook to collect test results.""" # Execute all other hooks to obtain the report object outcome = yield rep = outcome.get_result() # We're only interested in the call outcome if rep.when == "call": if hasattr(item, "callspec") and "implementation" in item.callspec.params: # Get implementation name and module impl_name, _ = item.callspec.params["implementation"] # Get test name test_name = item.nodeid.split("::")[-1] # Record result if rep.passed: test_results.record_result(impl_name, test_name, True) elif rep.failed: error_msg = str(rep.longrepr) if rep.longrepr else "Test failed" test_results.record_result(impl_name, test_name, False, error_msg) elif rep.skipped: skip_reason = rep.longrepr[2] if rep.longrepr else "Test skipped" test_results.record_skip(impl_name, test_name, skip_reason) # Hook to save results at the end of testing @pytest.hookimpl(trylast=True) def pytest_sessionfinish(session, exitstatus): """Save test results at the end of the test session.""" test_results.save_results()
import os import sys import glob import re import importlib.util import traceback import types from typing import Dict, List, Optional, Any, Tuple class TestUtils: @staticmethod def discover_implementation_files(directory: str = None) -> List[str]: """Find all implementation files in the current sandbox directory.""" if directory is None: directory = os.path.dirname(os.path.abspath(__file__)) patterns = [ r'modified_code\d+\.py', r'new_code\d+\.py', r'original_code\.py', r'implementation\d*\.py' ] pattern = re.compile('|'.join(f'({p})' for p in patterns)) implementations = [] for file_path in glob.glob(os.path.join(directory, '*.py')): if pattern.search(os.path.basename(file_path)): implementations.append(file_path) # Sort files numerically def sort_key(path): filename = os.path.basename(path) match = re.search(r'(\d+)', filename) return int(match.group(1)) if match else 0 return sorted(implementations, key=sort_key) @staticmethod def create_mock_module(file_path: str, module_name: str, error_info: str) -> types.ModuleType: """Create a mock module that contains error information but can still be tested.""" # Create a new module object mock_module = types.ModuleType(module_name) # Add basic attributes mock_module.__file__ = file_path mock_module.__name__ = module_name mock_module.__display_name__ = module_name mock_module.__error__ = error_info # Add a dummy function that can be detected by test functions def dummy_function(*args, **kwargs): return f"Error in module: {error_info}" setattr(mock_module, "implementation_error", dummy_function) return mock_module @staticmethod def load_module(file_path: str, module_name: Optional[str] = None) -> Any: """ Safely load a module from a file path with proper error handling. If the module has errors, return a mock module that can still be tested. """ if module_name is None: module_name = os.path.basename(file_path).replace('.py', '') # Create a unique module name to avoid conflicts sandbox_id = os.path.basename(os.path.dirname(file_path)) unique_module_name = f"{sandbox_id}_{module_name}" try: # First, try to read the file to check for syntax errors with open(file_path, 'r') as f: source_code = f.read() # Check for syntax errors by compiling the code try: compiled = compile(source_code, file_path, 'exec') except SyntaxError as e: error_msg = f"Syntax error: {str(e)}" print(f"Syntax error in {file_path}: {e}") print(f" Line {e.lineno}, column {e.offset}: {e.text}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Create the module spec spec = importlib.util.spec_from_file_location(unique_module_name, file_path) if spec is None or spec.loader is None: error_msg = f"Could not create spec for {file_path}" print(f"Error: {error_msg}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Create the module object module = importlib.util.module_from_spec(spec) sys.modules[unique_module_name] = module # Special handling for execution errors try: # Execute the module code in a safe way spec.loader.exec_module(module) # Store the original name for reference module.__display_name__ = module_name return module except Exception as e: error_msg = f"Runtime error: {str(e)}" traceback_str = traceback.format_exc() print(f"Error executing module {file_path}: {e}") print(traceback_str) # Create a partial module that contains what we loaded before the error mock_module = TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Copy any attributes that might have been defined before the error for attr_name in dir(module): if not attr_name.startswith('__'): try: setattr(mock_module, attr_name, getattr(module, attr_name)) except Exception: pass # Skip attributes that can't be copied return mock_module except FileNotFoundError as e: error_msg = f"File not found: {str(e)}" print(f"Error: {error_msg}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) except Exception as e: error_msg = f"Unexpected error: {str(e)}" print(f"Error loading module {file_path}: {e}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) @classmethod def load_all_implementations(cls, directory: str = None) -> Dict[str, Any]: """Load all implementation files in the directory, including those with errors.""" if directory is None: directory = os.path.dirname(os.path.abspath(__file__)) implementations = {} implementation_files = cls.discover_implementation_files(directory) if not implementation_files: print("WARNING: No implementation files found. Check your file naming patterns.") for file_path in implementation_files: module_name = os.path.basename(file_path).replace('.py', '') module = cls.load_module(file_path, module_name) # Always add the module, even if it has errors implementations[module_name] = module if hasattr(module, '__error__'): print(f"Loaded with errors: {module_name} - {module.__error__}") else: print(f"Successfully loaded: {module_name}") return implementations class TestResultsManager: def __init__(self): self.results = {} self.sandbox_name = os.path.basename(os.path.dirname(os.path.abspath(__file__))) def record_result(self, impl_name: str, test_name: str, passed: bool, error_msg: Optional[str] = None) -> None: """Record a test result for an implementation.""" if impl_name not in self.results: self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []} if passed: self.results[impl_name]["passed"] += 1 else: self.results[impl_name]["failed"] += 1 if error_msg: self.results[impl_name]["errors"].append({ "test": test_name, "error": error_msg }) def record_skip(self, impl_name: str, test_name: str, reason: Optional[str] = None) -> None: """Record a skipped test for an implementation.""" if impl_name not in self.results: self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []} self.results[impl_name]["skipped"] += 1 if reason: self.results[impl_name]["errors"].append({ "test": test_name, "error": f"SKIPPED: {reason}" }) def get_winner(self) -> Tuple[Optional[int], Dict]: """Determine the winner based on test results.""" winner = None max_passed = -1 for impl_name, results in self.results.items(): if impl_name == "original_code": continue # Skip original code when determining winner if results["passed"] > max_passed: max_passed = results["passed"] winner = impl_name # Break ties by looking at failure count elif results["passed"] == max_passed and winner is not None: if results["failed"] < self.results[winner]["failed"]: winner = impl_name # Convert winner to numeric index if possible winner_index = -1 if winner and re.match(r'modified_code\d+', winner): try: winner_index = int(re.search(r'(\d+)', winner).group(1)) except (AttributeError, ValueError): pass return winner_index, self.results def save_results(self, filename: str = "test_results.json") -> None: """Save test results to a JSON file.""" import json winner_index, results = self.get_winner() # Check if all tests were skipped all_skipped = all( stats["skipped"] == stats["passed"] + stats["failed"] + stats["skipped"] for impl_name, stats in results.items() if impl_name != "original_code" ) output = { "winner": winner_index, "all_skipped": all_skipped, "results": { name: { "passed": stats["passed"], "failed": stats["failed"], "skipped": stats["skipped"], "total": stats["passed"] + stats["failed"] + stats["skipped"] } for name, stats in results.items() if not name.startswith("_") # Skip internal items } } with open(filename, "w") as f: json.dump(output, f, indent=2) print(f"Test results saved to {filename}") return output
test
null
null
null
null
null
2
python
import streamlit as st # Создаем две формы для ввода данных # В первой форме значения сохраняются в словарь form1_dict напрямую # Во второй форме значения сохраняются в session_state и затем копируются в form2_dict form1_dict = {} with st.form('form1'): form1_dict['a'] = st.text_input('a') form1_dict['b'] = st.text_input('b') st.form_submit_button('Submit Form 1') st.write(form1_dict) with st.form('form2'): st.text_input('a', key='form2_a') st.text_input('b', key='form2_b') st.form_submit_button('Submit Form 2') # Создаем словарь form2_dict и копируем в него значения из session_state, # убирая префикс 'form2_' из ключей form2_dict = {} for key in st.session_state: if key.startswith('form2_'): form2_dict[key.removeprefix('form2_')] = st.session_state[key] st.write(form2_dict)
import streamlit as st # Создаем две формы для ввода данных # В первой форме значения сохраняются в словарь form1_dict напрямую # Во второй форме значения сохраняются в session_state и затем копируются в form2_dict form1_dict = {} with st.form('form1'): form1_dict['a'] = st.text_input('a') form1_dict['b'] = st.text_input('b') st.form_submit_button('Submit Form 1') st.write(form1_dict) with st.form('form2'): st.text_input('a', key='form2_a') st.text_input('b', key='form2_b') st.form_submit_button('Submit Form 2') # Создаем словарь form2_dict и копируем в него значения из session_state, # убирая префикс 'form2_' из ключей form2_dict = {} for key in st.session_state: if key.startswith('form2_'): form2_dict[key.removeprefix('form2_')] = st.session_state[key] st.write(form2_dict)
добавить print в конце, чтобы в консоли тоже выводился результат сабмита формы
import inspect import re from unittest.mock import patch, MagicMock import sys from io import StringIO import pytest def test_print_statements_existence(implementation): """Test if print statements have been added to the code.""" impl_name, module = implementation # Get the source code of the module source_code = inspect.getsource(module) # Check if the code contains print statements related to form submissions assert 'print(' in source_code, f"{impl_name}: No print statements found in the implementation" # Check for form1_dict in print statements assert re.search(r'print\(.*form1_dict.*\)', source_code), f"{impl_name}: No print statement for form1_dict found" # Check for form2_dict in print statements assert re.search(r'print\(.*form2_dict.*\)', source_code), f"{impl_name}: No print statement for form2_dict found" def test_print_statements_content(implementation): """Test if the print statements have appropriate descriptive content.""" impl_name, module = implementation # Get the source code of the module source_code = inspect.getsource(module) # Look for descriptive print statements rather than just printing the dictionaries form1_pattern = r'print\(\s*[\"\'].*[\"\'],\s*form1_dict\s*\)' form1_f_pattern = r'print\(\s*f[\"\'].*{form1_dict}.*[\"\']\s*\)' has_descriptive_form1 = re.search(form1_pattern, source_code) or re.search(form1_f_pattern, source_code) assert has_descriptive_form1, f"{impl_name}: Print statement for form1_dict should include descriptive text" form2_pattern = r'print\(\s*[\"\'].*[\"\'],\s*form2_dict\s*\)' form2_f_pattern = r'print\(\s*f[\"\'].*{form2_dict}.*[\"\']\s*\)' has_descriptive_form2 = re.search(form2_pattern, source_code) or re.search(form2_f_pattern, source_code) assert has_descriptive_form2, f"{impl_name}: Print statement for form2_dict should include descriptive text" def test_print_placement(implementation): """Test if print statements are placed in appropriate locations.""" impl_name, module = implementation # Get the source code of the module source_code = inspect.getsource(module) # Get line numbers of key elements form1_dict_print_line = -1 form2_dict_print_line = -1 form1_dict_assignment_line = -1 form2_dict_creation_line = -1 lines = source_code.split('\n') for i, line in enumerate(lines): if 'form1_dict = {}' in line: form1_dict_assignment_line = i elif 'form2_dict = {}' in line: form2_dict_creation_line = i elif 'print(' in line and 'form1_dict' in line: form1_dict_print_line = i elif 'print(' in line and 'form2_dict' in line: form2_dict_print_line = i # Check that print statements are after their respective dictionary operations assert form1_dict_print_line > form2_dict_creation_line, \ f"{impl_name}: form1_dict print statement should be after dictionary initialization" assert form2_dict_print_line > form2_dict_creation_line, \ f"{impl_name}: form2_dict print statement should be after dictionary population" def test_form_input_with_mocks(implementation): """Test the form input functionality using mocks.""" impl_name, module = implementation # Create a controlled test environment with mocks with patch.object(module.st, 'form') as mock_form, \ patch.object(module.st, 'text_input') as mock_text_input, \ patch.object(module.st, 'form_submit_button') as mock_submit, \ patch.object(module.st, 'write') as mock_write, \ patch.object(module.st, 'session_state', {'form2_a': 'test_value_a', 'form2_b': 'test_value_b'}): # Set return values for mocks mock_form.return_value.__enter__.return_value = MagicMock() mock_form.return_value.__exit__.return_value = None mock_text_input.return_value = 'test_input' mock_submit.return_value = True # Capture printed output old_stdout = sys.stdout captured_output = StringIO() sys.stdout = captured_output # Execute the main code logic directly # We need to manually call the key parts of the module instead of reloading # Form 1 handling (extracting this logic from the module) form1_dict = {} form1_dict['a'] = 'test_input' # Simulating what the module does with mock returns form1_dict['b'] = 'test_input' # Form 2 handling (extracting this logic from the module) form2_dict = {} for key in module.st.session_state: if key.startswith('form2_'): form2_dict[key.removeprefix('form2_')] = module.st.session_state[key] # Restore stdout sys.stdout = old_stdout # Check only modified versions have print output output = captured_output.getvalue() assert 'form1_dict' in output.lower() or 'form 1' in output.lower(), \ f"{impl_name}: form1_dict not in print output" assert 'form2_dict' in output.lower() or 'form 2' in output.lower(), \ f"{impl_name}: form2_dict not in print output"
pytest pytest-mock streamlit
import pytest import os import sys import json from typing import Dict, List, Optional, Any # Import from local test_utils.py in the same directory from test_utils import TestUtils, TestResultsManager # Load all implementations in the current sandbox implementations = TestUtils.load_all_implementations() test_results = TestResultsManager() @pytest.fixture(scope="session") def sandbox_dir(): """Fixture to provide the sandbox directory path.""" return os.path.dirname(os.path.abspath(__file__)) @pytest.fixture(scope="session") def sandbox_name(): """Fixture to provide the sandbox name.""" return os.path.basename(os.path.dirname(os.path.abspath(__file__))) @pytest.fixture(scope="session") def all_implementations(): """Fixture to provide all implementations as a dictionary.""" return implementations @pytest.fixture(params=list(implementations.items())) def implementation(request): """Fixture to provide each implementation to tests one at a time.""" return request.param @pytest.fixture(scope="session") def results_manager(): """Fixture to provide access to the test results manager.""" return test_results # Hook for collecting test results @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Pytest hook to collect test results.""" # Execute all other hooks to obtain the report object outcome = yield rep = outcome.get_result() # We're only interested in the call outcome if rep.when == "call": if hasattr(item, "callspec") and "implementation" in item.callspec.params: # Get implementation name and module impl_name, _ = item.callspec.params["implementation"] # Get test name test_name = item.nodeid.split("::")[-1] # Record result if rep.passed: test_results.record_result(impl_name, test_name, True) elif rep.failed: error_msg = str(rep.longrepr) if rep.longrepr else "Test failed" test_results.record_result(impl_name, test_name, False, error_msg) elif rep.skipped: skip_reason = rep.longrepr[2] if rep.longrepr else "Test skipped" test_results.record_skip(impl_name, test_name, skip_reason) # Hook to save results at the end of testing @pytest.hookimpl(trylast=True) def pytest_sessionfinish(session, exitstatus): """Save test results at the end of the test session.""" test_results.save_results()
import os import sys import glob import re import importlib.util import traceback import types from typing import Dict, List, Optional, Any, Tuple class TestUtils: @staticmethod def discover_implementation_files(directory: str = None) -> List[str]: """Find all implementation files in the current sandbox directory.""" if directory is None: directory = os.path.dirname(os.path.abspath(__file__)) patterns = [ r'modified_code\d+\.py', r'new_code\d+\.py', r'original_code\.py', r'implementation\d*\.py' ] pattern = re.compile('|'.join(f'({p})' for p in patterns)) implementations = [] for file_path in glob.glob(os.path.join(directory, '*.py')): if pattern.search(os.path.basename(file_path)): implementations.append(file_path) # Sort files numerically def sort_key(path): filename = os.path.basename(path) match = re.search(r'(\d+)', filename) return int(match.group(1)) if match else 0 return sorted(implementations, key=sort_key) @staticmethod def create_mock_module(file_path: str, module_name: str, error_info: str) -> types.ModuleType: """Create a mock module that contains error information but can still be tested.""" # Create a new module object mock_module = types.ModuleType(module_name) # Add basic attributes mock_module.__file__ = file_path mock_module.__name__ = module_name mock_module.__display_name__ = module_name mock_module.__error__ = error_info # Add a dummy function that can be detected by test functions def dummy_function(*args, **kwargs): return f"Error in module: {error_info}" setattr(mock_module, "implementation_error", dummy_function) return mock_module @staticmethod def load_module(file_path: str, module_name: Optional[str] = None) -> Any: """ Safely load a module from a file path with proper error handling. If the module has errors, return a mock module that can still be tested. """ if module_name is None: module_name = os.path.basename(file_path).replace('.py', '') # Create a unique module name to avoid conflicts sandbox_id = os.path.basename(os.path.dirname(file_path)) unique_module_name = f"{sandbox_id}_{module_name}" try: # First, try to read the file to check for syntax errors with open(file_path, 'r') as f: source_code = f.read() # Check for syntax errors by compiling the code try: compiled = compile(source_code, file_path, 'exec') except SyntaxError as e: error_msg = f"Syntax error: {str(e)}" print(f"Syntax error in {file_path}: {e}") print(f" Line {e.lineno}, column {e.offset}: {e.text}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Create the module spec spec = importlib.util.spec_from_file_location(unique_module_name, file_path) if spec is None or spec.loader is None: error_msg = f"Could not create spec for {file_path}" print(f"Error: {error_msg}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Create the module object module = importlib.util.module_from_spec(spec) sys.modules[unique_module_name] = module # Special handling for execution errors try: # Execute the module code in a safe way spec.loader.exec_module(module) # Store the original name for reference module.__display_name__ = module_name return module except Exception as e: error_msg = f"Runtime error: {str(e)}" traceback_str = traceback.format_exc() print(f"Error executing module {file_path}: {e}") print(traceback_str) # Create a partial module that contains what we loaded before the error mock_module = TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Copy any attributes that might have been defined before the error for attr_name in dir(module): if not attr_name.startswith('__'): try: setattr(mock_module, attr_name, getattr(module, attr_name)) except Exception: pass # Skip attributes that can't be copied return mock_module except FileNotFoundError as e: error_msg = f"File not found: {str(e)}" print(f"Error: {error_msg}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) except Exception as e: error_msg = f"Unexpected error: {str(e)}" print(f"Error loading module {file_path}: {e}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) @classmethod def load_all_implementations(cls, directory: str = None) -> Dict[str, Any]: """Load all implementation files in the directory, including those with errors.""" if directory is None: directory = os.path.dirname(os.path.abspath(__file__)) implementations = {} implementation_files = cls.discover_implementation_files(directory) if not implementation_files: print("WARNING: No implementation files found. Check your file naming patterns.") for file_path in implementation_files: module_name = os.path.basename(file_path).replace('.py', '') module = cls.load_module(file_path, module_name) # Always add the module, even if it has errors implementations[module_name] = module if hasattr(module, '__error__'): print(f"Loaded with errors: {module_name} - {module.__error__}") else: print(f"Successfully loaded: {module_name}") return implementations class TestResultsManager: def __init__(self): self.results = {} self.sandbox_name = os.path.basename(os.path.dirname(os.path.abspath(__file__))) def record_result(self, impl_name: str, test_name: str, passed: bool, error_msg: Optional[str] = None) -> None: """Record a test result for an implementation.""" if impl_name not in self.results: self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []} if passed: self.results[impl_name]["passed"] += 1 else: self.results[impl_name]["failed"] += 1 if error_msg: self.results[impl_name]["errors"].append({ "test": test_name, "error": error_msg }) def record_skip(self, impl_name: str, test_name: str, reason: Optional[str] = None) -> None: """Record a skipped test for an implementation.""" if impl_name not in self.results: self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []} self.results[impl_name]["skipped"] += 1 if reason: self.results[impl_name]["errors"].append({ "test": test_name, "error": f"SKIPPED: {reason}" }) def get_winner(self) -> Tuple[Optional[int], Dict]: """Determine the winner based on test results.""" winner = None max_passed = -1 for impl_name, results in self.results.items(): if impl_name == "original_code": continue # Skip original code when determining winner if results["passed"] > max_passed: max_passed = results["passed"] winner = impl_name # Break ties by looking at failure count elif results["passed"] == max_passed and winner is not None: if results["failed"] < self.results[winner]["failed"]: winner = impl_name # Convert winner to numeric index if possible winner_index = -1 if winner and re.match(r'modified_code\d+', winner): try: winner_index = int(re.search(r'(\d+)', winner).group(1)) except (AttributeError, ValueError): pass return winner_index, self.results def save_results(self, filename: str = "test_results.json") -> None: """Save test results to a JSON file.""" import json winner_index, results = self.get_winner() # Check if all tests were skipped all_skipped = all( stats["skipped"] == stats["passed"] + stats["failed"] + stats["skipped"] for impl_name, stats in results.items() if impl_name != "original_code" ) output = { "winner": winner_index, "all_skipped": all_skipped, "results": { name: { "passed": stats["passed"], "failed": stats["failed"], "skipped": stats["skipped"], "total": stats["passed"] + stats["failed"] + stats["skipped"] } for name, stats in results.items() if not name.startswith("_") # Skip internal items } } with open(filename, "w") as f: json.dump(output, f, indent=2) print(f"Test results saved to {filename}") return output
test
null
null
null
null
null
3
python
#function to converte string to date
crate sume finction from A to B
import pytest import inspect import types import sys import os import importlib.util from typing import Any, Callable, List, Tuple, Dict, Union def test_implementation_exists(implementation): """Test that the sum_from_a_to_b function exists in the implementation.""" impl_name, module = implementation # Check for function existence, but don't fail the test if it doesn't exist # This allows other tests to be skipped properly has_function = hasattr(module, "sum_from_a_to_b") if has_function: assert callable(module.sum_from_a_to_b), f"{impl_name}'s sum_from_a_to_b is not a function" else: pytest.skip(f"{impl_name} is missing the sum_from_a_to_b function") def test_function_signature(implementation): """Test that the sum_from_a_to_b function has the correct signature.""" impl_name, module = implementation if not hasattr(module, "sum_from_a_to_b"): pytest.skip(f"{impl_name} is missing the sum_from_a_to_b function") signature = inspect.signature(module.sum_from_a_to_b) assert len(signature.parameters) == 2, f"{impl_name}'s sum_from_a_to_b function should accept 2 parameters" # Check parameter names - common conventions would be a/b or start/end param_names = list(signature.parameters.keys()) assert len(param_names) == 2, f"{impl_name}'s sum_from_a_to_b should have exactly 2 parameters" @pytest.mark.parametrize("a, b, expected", [ (1, 5, 15), # Simple positive range (5, 10, 45), # Another positive range (0, 0, 0), # Same number (0, 5, 15), # Start from zero (-5, -1, -15), # Negative range (-3, 3, 0), # Range crossing zero (100, 105, 615), # Larger numbers ]) def test_sum_calculation_basic(implementation, a, b, expected): """Test basic calculation of sums from a to b.""" impl_name, module = implementation if not hasattr(module, "sum_from_a_to_b"): pytest.skip(f"{impl_name} is missing the sum_from_a_to_b function") result = module.sum_from_a_to_b(a, b) assert result == expected, f"{impl_name}'s sum_from_a_to_b({a}, {b}) should return {expected}, got {result}" def test_large_range(implementation): """Test with a large range to check efficiency.""" impl_name, module = implementation if not hasattr(module, "sum_from_a_to_b"): pytest.skip(f"{impl_name} is missing the sum_from_a_to_b function") a, b = 1, 1000 # Expected sum is n(n+1)/2 where n is the count of numbers expected = (b * (b + 1)) // 2 - ((a - 1) * a // 2) result = module.sum_from_a_to_b(a, b) assert result == expected, f"{impl_name}'s sum_from_a_to_b({a}, {b}) should return {expected}, got {result}" def test_reversed_parameters(implementation): """Test if the function handles cases where a > b.""" impl_name, module = implementation if not hasattr(module, "sum_from_a_to_b"): pytest.skip(f"{impl_name} is missing the sum_from_a_to_b function") a, b = 10, 5 expected_if_swapped = sum(range(b, a + 1)) # Try calling with reversed parameters try: result = module.sum_from_a_to_b(a, b) # Some implementations might return 0 or another value for reversed ranges # Try to determine if the implementation swaps parameters or has another strategy if result == expected_if_swapped: assert True, "Implementation handles reversed parameters by swapping" elif result == 0: assert True, "Implementation returns 0 for reversed parameters" else: # If it returns something else, check if it's consistent # This could be returning a negative value or some other special handling assert result == module.sum_from_a_to_b(a, b), "Implementation is consistent for reversed parameters" except Exception as e: # If the implementation raises an error, mark the test as skipped pytest.skip(f"{impl_name} doesn't handle reversed parameters: {str(e)}") def test_non_integer_input(implementation): """Test if the function properly handles or rejects non-integer inputs.""" impl_name, module = implementation if not hasattr(module, "sum_from_a_to_b"): pytest.skip(f"{impl_name} is missing the sum_from_a_to_b function") # Test with float inputs that are whole numbers try: result = module.sum_from_a_to_b(1.0, 5.0) # If the function accepts floats, verify the result assert result == 15, f"{impl_name}'s sum_from_a_to_b(1.0, 5.0) should return 15, got {result}" except (TypeError, ValueError, AssertionError) as e: # If the implementation rejects float inputs, that's also valid pass # Test with string inputs that can be converted to integers try: result = module.sum_from_a_to_b("1", "5") # If it accepts strings, verify the result assert result == 15, f"{impl_name}'s sum_from_a_to_b('1', '5') should return 15, got {result}" except (TypeError, ValueError, AssertionError) as e: # If the implementation rejects string inputs, that's valid pass def test_docstring_presence(implementation): """Test that the function has a docstring explaining what it does.""" impl_name, module = implementation if not hasattr(module, "sum_from_a_to_b"): pytest.skip(f"{impl_name} is missing the sum_from_a_to_b function") # The docstring might be None if not present docstring = module.sum_from_a_to_b.__doc__ # We won't fail the test if docstring is missing, but we'll note it if not docstring: print(f"Note: {impl_name}'s sum_from_a_to_b function is missing a docstring") else: assert len(docstring.strip()) > 0, f"{impl_name}'s docstring is empty" def test_edge_cases(implementation): """Test edge cases like very large numbers.""" impl_name, module = implementation if not hasattr(module, "sum_from_a_to_b"): pytest.skip(f"{impl_name} is missing the sum_from_a_to_b function") # Test with the max argument where sum can still be calculated precisely # Using smaller range to avoid very long calculations large_a, large_b = 9998, 10000 expected = sum(range(large_a, large_b + 1)) result = module.sum_from_a_to_b(large_a, large_b) assert result == expected, f"{impl_name}'s sum_from_a_to_b({large_a}, {large_b}) should return {expected}, got {result}" def test_formula_vs_iteration(implementation): """ Test if the implementation uses the mathematical formula rather than iteration. This is a bonus test to check for optimization. """ impl_name, module = implementation if not hasattr(module, "sum_from_a_to_b"): pytest.skip(f"{impl_name} is missing the sum_from_a_to_b function") # For larger ranges, the sum formula n(n+1)/2 is much faster a, b = 1, 10000 # Calculate expected result using the formula n = b - a + 1 expected = (n * (a + b)) // 2 # Time the function call import time start_time = time.time() result = module.sum_from_a_to_b(a, b) execution_time = time.time() - start_time assert result == expected, f"{impl_name}'s sum_from_a_to_b({a}, {b}) should return {expected}, got {result}" # We won't fail the test based on performance, just report it print(f"{impl_name}'s sum_from_a_to_b execution time for range {a} to {b}: {execution_time:.6f} seconds") def test_performance_threshold(implementation): """Test if the implementation is efficient for large inputs.""" impl_name, module = implementation if not hasattr(module, "sum_from_a_to_b"): pytest.skip(f"{impl_name} is missing the sum_from_a_to_b function") import time # Use a moderate-sized range to test performance a, b = 1, 100000 # Calculate expected result using the formula n = b - a + 1 expected = (n * (a + b)) // 2 # Set a reasonable threshold time (in seconds) # Formula-based implementations should be very fast THRESHOLD_TIME = 0.1 start_time = time.time() result = module.sum_from_a_to_b(a, b) execution_time = time.time() - start_time assert result == expected, f"{impl_name}'s sum_from_a_to_b({a}, {b}) returned incorrect result" # Note: We're not failing on performance, just reporting if execution_time > THRESHOLD_TIME: print(f"Note: {impl_name} implementation took {execution_time:.6f}s, which is above the ideal threshold of {THRESHOLD_TIME}s") else: print(f"{impl_name} implementation is efficient: {execution_time:.6f}s") def test_type_hints(implementation): """Test if the function has proper type hints (Python 3.5+).""" impl_name, module = implementation if not hasattr(module, "sum_from_a_to_b"): pytest.skip(f"{impl_name} is missing the sum_from_a_to_b function") # This is a bonus test and won't fail if type hints aren't present try: annotations = module.sum_from_a_to_b.__annotations__ if annotations: # Check if type hints are present for parameters and return value param_names = list(inspect.signature(module.sum_from_a_to_b).parameters.keys()) for param in param_names: if param in annotations: print(f"{impl_name} has type hint for parameter {param}: {annotations[param]}") if 'return' in annotations: print(f"{impl_name} has return type hint: {annotations['return']}") except (AttributeError, TypeError): # Older Python versions or implementations without type hints pass
pytest pytest-mock
import pytest import os import sys import json from typing import Dict, List, Optional, Any # Import from local test_utils.py in the same directory from test_utils import TestUtils, TestResultsManager # Load all implementations in the current sandbox implementations = TestUtils.load_all_implementations() test_results = TestResultsManager() @pytest.fixture(scope="session") def sandbox_dir(): """Fixture to provide the sandbox directory path.""" return os.path.dirname(os.path.abspath(__file__)) @pytest.fixture(scope="session") def sandbox_name(): """Fixture to provide the sandbox name.""" return os.path.basename(os.path.dirname(os.path.abspath(__file__))) @pytest.fixture(scope="session") def all_implementations(): """Fixture to provide all implementations as a dictionary.""" return implementations @pytest.fixture(params=list(implementations.items())) def implementation(request): """Fixture to provide each implementation to tests one at a time.""" return request.param @pytest.fixture(scope="session") def results_manager(): """Fixture to provide access to the test results manager.""" return test_results # Hook for collecting test results @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Pytest hook to collect test results.""" # Execute all other hooks to obtain the report object outcome = yield rep = outcome.get_result() # We're only interested in the call outcome if rep.when == "call": if hasattr(item, "callspec") and "implementation" in item.callspec.params: # Get implementation name and module impl_name, _ = item.callspec.params["implementation"] # Get test name test_name = item.nodeid.split("::")[-1] # Record result if rep.passed: test_results.record_result(impl_name, test_name, True) elif rep.failed: error_msg = str(rep.longrepr) if rep.longrepr else "Test failed" test_results.record_result(impl_name, test_name, False, error_msg) elif rep.skipped: skip_reason = rep.longrepr[2] if rep.longrepr else "Test skipped" test_results.record_skip(impl_name, test_name, skip_reason) # Hook to save results at the end of testing @pytest.hookimpl(trylast=True) def pytest_sessionfinish(session, exitstatus): """Save test results at the end of the test session.""" test_results.save_results()
import os import sys import glob import re import importlib.util import traceback import types from typing import Dict, List, Optional, Any, Tuple class TestUtils: @staticmethod def discover_implementation_files(directory: str = None) -> List[str]: """Find all implementation files in the current sandbox directory.""" if directory is None: directory = os.path.dirname(os.path.abspath(__file__)) patterns = [ r'modified_code\d+\.py', r'new_code\d+\.py', r'original_code\.py', r'implementation\d*\.py' ] pattern = re.compile('|'.join(f'({p})' for p in patterns)) implementations = [] for file_path in glob.glob(os.path.join(directory, '*.py')): if pattern.search(os.path.basename(file_path)): implementations.append(file_path) # Sort files numerically def sort_key(path): filename = os.path.basename(path) match = re.search(r'(\d+)', filename) return int(match.group(1)) if match else 0 return sorted(implementations, key=sort_key) @staticmethod def create_mock_module(file_path: str, module_name: str, error_info: str) -> types.ModuleType: """Create a mock module that contains error information but can still be tested.""" # Create a new module object mock_module = types.ModuleType(module_name) # Add basic attributes mock_module.__file__ = file_path mock_module.__name__ = module_name mock_module.__display_name__ = module_name mock_module.__error__ = error_info # Add a dummy function that can be detected by test functions def dummy_function(*args, **kwargs): return f"Error in module: {error_info}" setattr(mock_module, "implementation_error", dummy_function) return mock_module @staticmethod def load_module(file_path: str, module_name: Optional[str] = None) -> Any: """ Safely load a module from a file path with proper error handling. If the module has errors, return a mock module that can still be tested. """ if module_name is None: module_name = os.path.basename(file_path).replace('.py', '') # Create a unique module name to avoid conflicts sandbox_id = os.path.basename(os.path.dirname(file_path)) unique_module_name = f"{sandbox_id}_{module_name}" try: # First, try to read the file to check for syntax errors with open(file_path, 'r') as f: source_code = f.read() # Check for syntax errors by compiling the code try: compiled = compile(source_code, file_path, 'exec') except SyntaxError as e: error_msg = f"Syntax error: {str(e)}" print(f"Syntax error in {file_path}: {e}") print(f" Line {e.lineno}, column {e.offset}: {e.text}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Create the module spec spec = importlib.util.spec_from_file_location(unique_module_name, file_path) if spec is None or spec.loader is None: error_msg = f"Could not create spec for {file_path}" print(f"Error: {error_msg}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Create the module object module = importlib.util.module_from_spec(spec) sys.modules[unique_module_name] = module # Special handling for execution errors try: # Execute the module code in a safe way spec.loader.exec_module(module) # Store the original name for reference module.__display_name__ = module_name return module except Exception as e: error_msg = f"Runtime error: {str(e)}" traceback_str = traceback.format_exc() print(f"Error executing module {file_path}: {e}") print(traceback_str) # Create a partial module that contains what we loaded before the error mock_module = TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Copy any attributes that might have been defined before the error for attr_name in dir(module): if not attr_name.startswith('__'): try: setattr(mock_module, attr_name, getattr(module, attr_name)) except Exception: pass # Skip attributes that can't be copied return mock_module except FileNotFoundError as e: error_msg = f"File not found: {str(e)}" print(f"Error: {error_msg}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) except Exception as e: error_msg = f"Unexpected error: {str(e)}" print(f"Error loading module {file_path}: {e}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) @classmethod def load_all_implementations(cls, directory: str = None) -> Dict[str, Any]: """Load all implementation files in the directory, including those with errors.""" if directory is None: directory = os.path.dirname(os.path.abspath(__file__)) implementations = {} implementation_files = cls.discover_implementation_files(directory) if not implementation_files: print("WARNING: No implementation files found. Check your file naming patterns.") for file_path in implementation_files: module_name = os.path.basename(file_path).replace('.py', '') module = cls.load_module(file_path, module_name) # Always add the module, even if it has errors implementations[module_name] = module if hasattr(module, '__error__'): print(f"Loaded with errors: {module_name} - {module.__error__}") else: print(f"Successfully loaded: {module_name}") return implementations class TestResultsManager: def __init__(self): self.results = {} self.sandbox_name = os.path.basename(os.path.dirname(os.path.abspath(__file__))) def record_result(self, impl_name: str, test_name: str, passed: bool, error_msg: Optional[str] = None) -> None: """Record a test result for an implementation.""" if impl_name not in self.results: self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []} if passed: self.results[impl_name]["passed"] += 1 else: self.results[impl_name]["failed"] += 1 if error_msg: self.results[impl_name]["errors"].append({ "test": test_name, "error": error_msg }) def record_skip(self, impl_name: str, test_name: str, reason: Optional[str] = None) -> None: """Record a skipped test for an implementation.""" if impl_name not in self.results: self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []} self.results[impl_name]["skipped"] += 1 if reason: self.results[impl_name]["errors"].append({ "test": test_name, "error": f"SKIPPED: {reason}" }) def get_winner(self) -> Tuple[Optional[int], Dict]: """Determine the winner based on test results.""" winner = None max_passed = -1 for impl_name, results in self.results.items(): if impl_name == "original_code": continue # Skip original code when determining winner if results["passed"] > max_passed: max_passed = results["passed"] winner = impl_name # Break ties by looking at failure count elif results["passed"] == max_passed and winner is not None: if results["failed"] < self.results[winner]["failed"]: winner = impl_name # Convert winner to numeric index if possible winner_index = -1 if winner and re.match(r'modified_code\d+', winner): try: winner_index = int(re.search(r'(\d+)', winner).group(1)) except (AttributeError, ValueError): pass return winner_index, self.results def save_results(self, filename: str = "test_results.json") -> None: """Save test results to a JSON file.""" import json winner_index, results = self.get_winner() # Check if all tests were skipped all_skipped = all( stats["skipped"] == stats["passed"] + stats["failed"] + stats["skipped"] for impl_name, stats in results.items() if impl_name != "original_code" ) output = { "winner": winner_index, "all_skipped": all_skipped, "results": { name: { "passed": stats["passed"], "failed": stats["failed"], "skipped": stats["skipped"], "total": stats["passed"] + stats["failed"] + stats["skipped"] } for name, stats in results.items() if not name.startswith("_") # Skip internal items } } with open(filename, "w") as f: json.dump(output, f, indent=2) print(f"Test results saved to {filename}") return output
test
null
null
null
null
null
4
python
# generate a half adder module of verilog by python # verilog code verilog_code = """ module half_adder(a, b, c, sum, carry); input a, b; output c, sum, carry; assign c = a ^ b; assign sum = a & b; assign carry = a & b; endmodule """ # verilog module name module_name = "half_adder" # verilog module input and output ports input_ports = ["a", "b"] output_ports = ["c", "sum", "carry"] # verilog module parameters parameters = ["WIDTH"] # verilog module parameters value parameter_values = ["8"] # verilog module body module_body = """ input a, b; output c, sum, carry; assign c = a ^ b; assign sum = a & b; assign carry = a & b; endmodule """ # verilog module instantiation module_instantiation = """ half_adder half_adder_inst( .a(a), .b(b), .c(c), .sum(sum), .carry(carry) ); """
# verilog module body module_body = """ input a, b; output c, sum, carry; assign c = a ^ b; assign sum = a & b; assign carry = a & b; endmodule """
add more input signals
import re import pytest def test_input_ports_added(implementation): """Test that additional input ports have been added to the module_body.""" impl_name, module = implementation # Skip test for implementations without module_body attribute if not hasattr(module, 'module_body'): pytest.skip(f"{impl_name}: No module_body attribute found") # Check if the module_body contains more than just a and b as inputs input_pattern = r"input\s+([^;]+);" input_matches = re.search(input_pattern, module.module_body) if not input_matches: pytest.fail(f"{impl_name}: Failed to find input declaration in module_body") input_declaration = input_matches.group(1) input_signals = [s.strip() for s in input_declaration.split(",")] # The original has only "a, b" as inputs assert len(input_signals) > 2, f"{impl_name}: Should have more than 2 input signals, but found {len(input_signals)}" # Verify the original inputs are still there assert "a" in input_signals, f"{impl_name}: Original input 'a' should be preserved" assert "b" in input_signals, f"{impl_name}: Original input 'b' should be preserved" # Verify new inputs have been added new_inputs = [signal for signal in input_signals if signal not in ["a", "b"]] assert len(new_inputs) > 0, f"{impl_name}: No new input signals were added" def test_input_ports_list_updated(implementation): """Test that input_ports list has been updated to reflect new inputs.""" impl_name, module = implementation # Skip test for implementations without required attributes if not hasattr(module, 'module_body') or not hasattr(module, 'input_ports'): pytest.skip(f"{impl_name}: Missing required attributes") # Extract input signals from module_body input_pattern = r"input\s+([^;]+);" input_matches = re.search(input_pattern, module.module_body) if not input_matches: pytest.fail(f"{impl_name}: Failed to find input declaration in module_body") input_declaration = input_matches.group(1) module_body_inputs = set(s.strip() for s in input_declaration.split(",")) # Handle the case where 'c' appears both as input and output # The analysis shows implementations may have 'c' as both input and output duplicated_ports = set() if hasattr(module, 'output_ports'): duplicated_ports = module_body_inputs.intersection(set(module.output_ports)) # Check if module.input_ports list is updated to include all new inputs from module_body # (excluding duplicates that are also outputs) module_input_ports_set = set(module.input_ports) # Get the inputs that are in module_body but not in input_ports list missing_inputs = module_body_inputs - module_input_ports_set - duplicated_ports # If missing inputs are found, suggest what to add if missing_inputs: # Expected updated input_ports list expected_inputs = sorted(list(module_input_ports_set.union(missing_inputs))) message = (f"{impl_name}: input_ports list missing inputs from module_body: {missing_inputs}. " f"Update input_ports to include: {expected_inputs}") assert not missing_inputs, message def test_verilog_code_consistency(implementation): """Test that the verilog_code is consistent with module_body for inputs.""" impl_name, module = implementation # Skip test for implementations without required attributes if not hasattr(module, 'module_body') or not hasattr(module, 'verilog_code'): pytest.skip(f"{impl_name}: Missing required attributes") # Check that the original inputs are in verilog_code original_inputs = ["a", "b"] for input_name in original_inputs: # Look for the input name as a word boundary in verilog_code pattern = rf"\b{re.escape(input_name)}\b" assert re.search(pattern, module.verilog_code), f"{impl_name}: Original input '{input_name}' not found in verilog_code" def test_module_instantiation_updated(implementation): """Test that module_instantiation has been updated to include new inputs.""" impl_name, module = implementation # Skip test for implementations without required attributes if not hasattr(module, 'module_body') or not hasattr(module, 'module_instantiation'): pytest.skip(f"{impl_name}: Missing required attributes") # Extract input signals from module_body input_pattern = r"input\s+([^;]+);" input_matches = re.search(input_pattern, module.module_body) if not input_matches: pytest.fail(f"{impl_name}: Failed to find input declaration in module_body") # Just check that the original inputs are in the module_instantiation original_inputs = ["a", "b"] for input_name in original_inputs: # Check if the input is connected in the instantiation pattern = rf"\.{re.escape(input_name)}\s*\(" assert re.search(pattern, module.module_instantiation), f"{impl_name}: Original input '{input_name}' not found in module_instantiation" def test_logic_updated_for_new_inputs(implementation): """Test that the logic in the module has been updated to use the new inputs.""" impl_name, module = implementation # Skip test for implementations without module_body attribute if not hasattr(module, 'module_body'): pytest.skip(f"{impl_name}: No module_body attribute found") # Extract input signals from module_body input_pattern = r"input\s+([^;]+);" input_matches = re.search(input_pattern, module.module_body) if not input_matches: pytest.fail(f"{impl_name}: Failed to find input declaration in module_body") input_declaration = input_matches.group(1) input_signals = [s.strip() for s in input_declaration.split(",")] # Original inputs original_inputs = ["a", "b"] new_inputs = [signal for signal in input_signals if signal not in original_inputs] if not new_inputs: pytest.skip(f"{impl_name}: No new input signals were found to test in logic") # Look for any usage of new inputs in the module body # Extract the logic section (everything after the port declarations) module_content = module.module_body # Remove the input and output declaration lines input_output_pattern = r"(input|output)\s+[^;]+;" logic_section = re.sub(input_output_pattern, "", module_content) # Check if any new input is used in the logic section used_inputs = set() for new_input in new_inputs: # Check if the new input appears as a word boundary in the logic section if re.search(rf'\b{re.escape(new_input)}\b', logic_section): used_inputs.add(new_input) # If no inputs are used, provide information about the implementation if not used_inputs: # Extract assign statements for better error messages assign_pattern = r"assign\s+(\w+)\s*=\s*([^;]+);" assigns = list(re.finditer(assign_pattern, module.module_body)) if not assigns: pytest.skip(f"{impl_name}: No assign statements found to test for input usage") else: # Extract the right-hand side of assign statements assign_exprs = [assign.group(2) for assign in assigns] # Suggest how to update logic to use new inputs suggested_logic = [] for i, expr in enumerate(assign_exprs): if i == 0: # c suggested_logic.append(f"{expr} ^ {' ^ '.join(new_inputs[:2])}") elif i == 1: # sum suggested_logic.append(f"({expr}) | ({' & '.join(new_inputs[:2])})") elif i == 2: # carry suggested_logic.append(f"{expr} & {' & '.join(new_inputs[:2])}") fail_msg = (f"{impl_name}: None of the new inputs ({new_inputs}) are used in the logic. " f"Found assigns: {assign_exprs}. " f"Consider updating to: {suggested_logic}") assert used_inputs, fail_msg def test_no_invalid_input_names(implementation): """Test that there are no invalid input names.""" impl_name, module = implementation # Skip test for implementations without module_body attribute if not hasattr(module, 'module_body'): pytest.skip(f"{impl_name}: No module_body attribute found") # Extract input signals from module_body input_pattern = r"input\s+([^;]+);" input_matches = re.search(input_pattern, module.module_body) if not input_matches: pytest.fail(f"{impl_name}: Failed to find input declaration in module_body") input_declaration = input_matches.group(1) input_signals = [s.strip() for s in input_declaration.split(",")] # Check for duplicates in input list input_set = set() duplicates = set() for signal in input_signals: if signal in input_set: duplicates.add(signal) input_set.add(signal) # Allow 'c' to be duplicated as it could be both input and output in these examples allowed_duplicates = {'c'} real_duplicates = duplicates - allowed_duplicates assert not real_duplicates, f"{impl_name}: Duplicate input signals found: {real_duplicates}" # Check for invalid Verilog identifiers invalid_identifiers = [] for signal in input_signals: # Verilog identifiers can only contain letters, numbers, underscore and $ # Must start with a letter or underscore if not re.match(r'^[a-zA-Z_][a-zA-Z0-9_$]*$', signal): invalid_identifiers.append(signal) assert not invalid_identifiers, f"{impl_name}: Invalid Verilog identifiers found: {invalid_identifiers}" def test_required_attributes_exist(implementation): """Test that all required attributes exist in the implementation.""" impl_name, module = implementation # Required attributes for a complete implementation required_attributes = [ 'module_body', 'verilog_code', 'module_instantiation', 'input_ports', 'output_ports' ] # For new_code2, we should check if the module has any attributes at all # before reporting all missing attributes if not any(hasattr(module, attr) for attr in required_attributes): pytest.skip(f"{impl_name}: Implementation appears incomplete, no required attributes found") missing_attributes = [] for attr in required_attributes: if not hasattr(module, attr): missing_attributes.append(attr) assert not missing_attributes, f"{impl_name}: Missing required attributes: {missing_attributes}"
pytest pytest-mock
import pytest import os import sys import json from typing import Dict, List, Optional, Any # Import from local test_utils.py in the same directory from test_utils import TestUtils, TestResultsManager # Load all implementations in the current sandbox implementations = TestUtils.load_all_implementations() test_results = TestResultsManager() @pytest.fixture(scope="session") def sandbox_dir(): """Fixture to provide the sandbox directory path.""" return os.path.dirname(os.path.abspath(__file__)) @pytest.fixture(scope="session") def sandbox_name(): """Fixture to provide the sandbox name.""" return os.path.basename(os.path.dirname(os.path.abspath(__file__))) @pytest.fixture(scope="session") def all_implementations(): """Fixture to provide all implementations as a dictionary.""" return implementations @pytest.fixture(params=list(implementations.items())) def implementation(request): """Fixture to provide each implementation to tests one at a time.""" return request.param @pytest.fixture(scope="session") def results_manager(): """Fixture to provide access to the test results manager.""" return test_results # Hook for collecting test results @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Pytest hook to collect test results.""" # Execute all other hooks to obtain the report object outcome = yield rep = outcome.get_result() # We're only interested in the call outcome if rep.when == "call": if hasattr(item, "callspec") and "implementation" in item.callspec.params: # Get implementation name and module impl_name, _ = item.callspec.params["implementation"] # Get test name test_name = item.nodeid.split("::")[-1] # Record result if rep.passed: test_results.record_result(impl_name, test_name, True) elif rep.failed: error_msg = str(rep.longrepr) if rep.longrepr else "Test failed" test_results.record_result(impl_name, test_name, False, error_msg) elif rep.skipped: skip_reason = rep.longrepr[2] if rep.longrepr else "Test skipped" test_results.record_skip(impl_name, test_name, skip_reason) # Hook to save results at the end of testing @pytest.hookimpl(trylast=True) def pytest_sessionfinish(session, exitstatus): """Save test results at the end of the test session.""" test_results.save_results()
import os import sys import glob import re import importlib.util import traceback import types from typing import Dict, List, Optional, Any, Tuple class TestUtils: @staticmethod def discover_implementation_files(directory: str = None) -> List[str]: """Find all implementation files in the current sandbox directory.""" if directory is None: directory = os.path.dirname(os.path.abspath(__file__)) patterns = [ r'modified_code\d+\.py', r'new_code\d+\.py', r'original_code\.py', r'implementation\d*\.py' ] pattern = re.compile('|'.join(f'({p})' for p in patterns)) implementations = [] for file_path in glob.glob(os.path.join(directory, '*.py')): if pattern.search(os.path.basename(file_path)): implementations.append(file_path) # Sort files numerically def sort_key(path): filename = os.path.basename(path) match = re.search(r'(\d+)', filename) return int(match.group(1)) if match else 0 return sorted(implementations, key=sort_key) @staticmethod def create_mock_module(file_path: str, module_name: str, error_info: str) -> types.ModuleType: """Create a mock module that contains error information but can still be tested.""" # Create a new module object mock_module = types.ModuleType(module_name) # Add basic attributes mock_module.__file__ = file_path mock_module.__name__ = module_name mock_module.__display_name__ = module_name mock_module.__error__ = error_info # Add a dummy function that can be detected by test functions def dummy_function(*args, **kwargs): return f"Error in module: {error_info}" setattr(mock_module, "implementation_error", dummy_function) return mock_module @staticmethod def load_module(file_path: str, module_name: Optional[str] = None) -> Any: """ Safely load a module from a file path with proper error handling. If the module has errors, return a mock module that can still be tested. """ if module_name is None: module_name = os.path.basename(file_path).replace('.py', '') # Create a unique module name to avoid conflicts sandbox_id = os.path.basename(os.path.dirname(file_path)) unique_module_name = f"{sandbox_id}_{module_name}" try: # First, try to read the file to check for syntax errors with open(file_path, 'r') as f: source_code = f.read() # Check for syntax errors by compiling the code try: compiled = compile(source_code, file_path, 'exec') except SyntaxError as e: error_msg = f"Syntax error: {str(e)}" print(f"Syntax error in {file_path}: {e}") print(f" Line {e.lineno}, column {e.offset}: {e.text}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Create the module spec spec = importlib.util.spec_from_file_location(unique_module_name, file_path) if spec is None or spec.loader is None: error_msg = f"Could not create spec for {file_path}" print(f"Error: {error_msg}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Create the module object module = importlib.util.module_from_spec(spec) sys.modules[unique_module_name] = module # Special handling for execution errors try: # Execute the module code in a safe way spec.loader.exec_module(module) # Store the original name for reference module.__display_name__ = module_name return module except Exception as e: error_msg = f"Runtime error: {str(e)}" traceback_str = traceback.format_exc() print(f"Error executing module {file_path}: {e}") print(traceback_str) # Create a partial module that contains what we loaded before the error mock_module = TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Copy any attributes that might have been defined before the error for attr_name in dir(module): if not attr_name.startswith('__'): try: setattr(mock_module, attr_name, getattr(module, attr_name)) except Exception: pass # Skip attributes that can't be copied return mock_module except FileNotFoundError as e: error_msg = f"File not found: {str(e)}" print(f"Error: {error_msg}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) except Exception as e: error_msg = f"Unexpected error: {str(e)}" print(f"Error loading module {file_path}: {e}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) @classmethod def load_all_implementations(cls, directory: str = None) -> Dict[str, Any]: """Load all implementation files in the directory, including those with errors.""" if directory is None: directory = os.path.dirname(os.path.abspath(__file__)) implementations = {} implementation_files = cls.discover_implementation_files(directory) if not implementation_files: print("WARNING: No implementation files found. Check your file naming patterns.") for file_path in implementation_files: module_name = os.path.basename(file_path).replace('.py', '') module = cls.load_module(file_path, module_name) # Always add the module, even if it has errors implementations[module_name] = module if hasattr(module, '__error__'): print(f"Loaded with errors: {module_name} - {module.__error__}") else: print(f"Successfully loaded: {module_name}") return implementations class TestResultsManager: def __init__(self): self.results = {} self.sandbox_name = os.path.basename(os.path.dirname(os.path.abspath(__file__))) def record_result(self, impl_name: str, test_name: str, passed: bool, error_msg: Optional[str] = None) -> None: """Record a test result for an implementation.""" if impl_name not in self.results: self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []} if passed: self.results[impl_name]["passed"] += 1 else: self.results[impl_name]["failed"] += 1 if error_msg: self.results[impl_name]["errors"].append({ "test": test_name, "error": error_msg }) def record_skip(self, impl_name: str, test_name: str, reason: Optional[str] = None) -> None: """Record a skipped test for an implementation.""" if impl_name not in self.results: self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []} self.results[impl_name]["skipped"] += 1 if reason: self.results[impl_name]["errors"].append({ "test": test_name, "error": f"SKIPPED: {reason}" }) def get_winner(self) -> Tuple[Optional[int], Dict]: """Determine the winner based on test results.""" winner = None max_passed = -1 for impl_name, results in self.results.items(): if impl_name == "original_code": continue # Skip original code when determining winner if results["passed"] > max_passed: max_passed = results["passed"] winner = impl_name # Break ties by looking at failure count elif results["passed"] == max_passed and winner is not None: if results["failed"] < self.results[winner]["failed"]: winner = impl_name # Convert winner to numeric index if possible winner_index = -1 if winner and re.match(r'modified_code\d+', winner): try: winner_index = int(re.search(r'(\d+)', winner).group(1)) except (AttributeError, ValueError): pass return winner_index, self.results def save_results(self, filename: str = "test_results.json") -> None: """Save test results to a JSON file.""" import json winner_index, results = self.get_winner() # Check if all tests were skipped all_skipped = all( stats["skipped"] == stats["passed"] + stats["failed"] + stats["skipped"] for impl_name, stats in results.items() if impl_name != "original_code" ) output = { "winner": winner_index, "all_skipped": all_skipped, "results": { name: { "passed": stats["passed"], "failed": stats["failed"], "skipped": stats["skipped"], "total": stats["passed"] + stats["failed"] + stats["skipped"] } for name, stats in results.items() if not name.startswith("_") # Skip internal items } } with open(filename, "w") as f: json.dump(output, f, indent=2) print(f"Test results saved to {filename}") return output
test
null
null
null
null
null
5
python
def is_prime(n):
def is_prime(n):
add a function to check for primes
# test_is_prime.py import pytest import inspect import random def test_is_prime_exists(implementation): """Test that the is_prime function exists and is callable.""" impl_name, module = implementation if not hasattr(module, "is_prime"): pytest.skip(f"{impl_name} has no is_prime function") assert callable(module.is_prime), f"{impl_name}: is_prime should be callable" def test_is_prime_signature(implementation): """Test that is_prime takes exactly one parameter.""" impl_name, module = implementation if not hasattr(module, "is_prime"): pytest.skip(f"{impl_name} has no is_prime function") sig = inspect.signature(module.is_prime) assert len(sig.parameters) == 1, f"{impl_name}: is_prime should take exactly one argument" @pytest.mark.parametrize("n,expected", [ # small primes (2, True), (3, True), (5, True), (7, True), (11, True), # small non‑primes (0, False), (1, False), (4, False), (6, False), (9, False), # negatives (-1, False), (-2, False), (-17, False), ]) def test_is_prime_basic_cases(implementation, n, expected): """Basic known primes, non‑primes, and negatives.""" _, module = implementation if not hasattr(module, "is_prime"): pytest.skip("no is_prime") assert module.is_prime(n) is expected, f"is_prime({n}) should be {expected}" def naive_is_prime(n): """Reference implementation.""" if n <= 1: return False if n <= 3: return True if n % 2 == 0 or n % 3 == 0: return False i = 5 while i * i <= n: if n % i == 0 or n % (i + 2) == 0: return False i += 6 return True def test_is_prime_random(implementation): """Cross‑check is_prime against a simple naive algorithm on random inputs.""" _, module = implementation if not hasattr(module, "is_prime"): pytest.skip("no is_prime") random.seed(0) for n in random.sample(range(0, 200), 30): assert module.is_prime(n) == naive_is_prime(n), f"Mismatch on {n}"
pytest pytest-mock
import pytest import os import sys import json from typing import Dict, List, Optional, Any # Import from local test_utils.py in the same directory from test_utils import TestUtils, TestResultsManager # Load all implementations in the current sandbox implementations = TestUtils.load_all_implementations() test_results = TestResultsManager() @pytest.fixture(scope="session") def sandbox_dir(): """Fixture to provide the sandbox directory path.""" return os.path.dirname(os.path.abspath(__file__)) @pytest.fixture(scope="session") def sandbox_name(): """Fixture to provide the sandbox name.""" return os.path.basename(os.path.dirname(os.path.abspath(__file__))) @pytest.fixture(scope="session") def all_implementations(): """Fixture to provide all implementations as a dictionary.""" return implementations @pytest.fixture(params=list(implementations.items())) def implementation(request): """Fixture to provide each implementation to tests one at a time.""" return request.param @pytest.fixture(scope="session") def results_manager(): """Fixture to provide access to the test results manager.""" return test_results # Hook for collecting test results @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Pytest hook to collect test results.""" # Execute all other hooks to obtain the report object outcome = yield rep = outcome.get_result() # We're only interested in the call outcome if rep.when == "call": if hasattr(item, "callspec") and "implementation" in item.callspec.params: # Get implementation name and module impl_name, _ = item.callspec.params["implementation"] # Get test name test_name = item.nodeid.split("::")[-1] # Record result if rep.passed: test_results.record_result(impl_name, test_name, True) elif rep.failed: error_msg = str(rep.longrepr) if rep.longrepr else "Test failed" test_results.record_result(impl_name, test_name, False, error_msg) elif rep.skipped: skip_reason = rep.longrepr[2] if rep.longrepr else "Test skipped" test_results.record_skip(impl_name, test_name, skip_reason) # Hook to save results at the end of testing @pytest.hookimpl(trylast=True) def pytest_sessionfinish(session, exitstatus): """Save test results at the end of the test session.""" test_results.save_results()
import os import sys import glob import re import importlib.util import traceback import types from typing import Dict, List, Optional, Any, Tuple class TestUtils: @staticmethod def discover_implementation_files(directory: str = None) -> List[str]: """Find all implementation files in the current sandbox directory.""" if directory is None: directory = os.path.dirname(os.path.abspath(__file__)) patterns = [ r'modified_code\d+\.py', r'new_code\d+\.py', r'original_code\.py', r'implementation\d*\.py' ] pattern = re.compile('|'.join(f'({p})' for p in patterns)) implementations = [] for file_path in glob.glob(os.path.join(directory, '*.py')): if pattern.search(os.path.basename(file_path)): implementations.append(file_path) # Sort files numerically def sort_key(path): filename = os.path.basename(path) match = re.search(r'(\d+)', filename) return int(match.group(1)) if match else 0 return sorted(implementations, key=sort_key) @staticmethod def create_mock_module(file_path: str, module_name: str, error_info: str) -> types.ModuleType: """Create a mock module that contains error information but can still be tested.""" # Create a new module object mock_module = types.ModuleType(module_name) # Add basic attributes mock_module.__file__ = file_path mock_module.__name__ = module_name mock_module.__display_name__ = module_name mock_module.__error__ = error_info # Add a dummy function that can be detected by test functions def dummy_function(*args, **kwargs): return f"Error in module: {error_info}" setattr(mock_module, "implementation_error", dummy_function) return mock_module @staticmethod def load_module(file_path: str, module_name: Optional[str] = None) -> Any: """ Safely load a module from a file path with proper error handling. If the module has errors, return a mock module that can still be tested. """ if module_name is None: module_name = os.path.basename(file_path).replace('.py', '') # Create a unique module name to avoid conflicts sandbox_id = os.path.basename(os.path.dirname(file_path)) unique_module_name = f"{sandbox_id}_{module_name}" try: # First, try to read the file to check for syntax errors with open(file_path, 'r') as f: source_code = f.read() # Check for syntax errors by compiling the code try: compiled = compile(source_code, file_path, 'exec') except SyntaxError as e: error_msg = f"Syntax error: {str(e)}" print(f"Syntax error in {file_path}: {e}") print(f" Line {e.lineno}, column {e.offset}: {e.text}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Create the module spec spec = importlib.util.spec_from_file_location(unique_module_name, file_path) if spec is None or spec.loader is None: error_msg = f"Could not create spec for {file_path}" print(f"Error: {error_msg}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Create the module object module = importlib.util.module_from_spec(spec) sys.modules[unique_module_name] = module # Special handling for execution errors try: # Execute the module code in a safe way spec.loader.exec_module(module) # Store the original name for reference module.__display_name__ = module_name return module except Exception as e: error_msg = f"Runtime error: {str(e)}" traceback_str = traceback.format_exc() print(f"Error executing module {file_path}: {e}") print(traceback_str) # Create a partial module that contains what we loaded before the error mock_module = TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Copy any attributes that might have been defined before the error for attr_name in dir(module): if not attr_name.startswith('__'): try: setattr(mock_module, attr_name, getattr(module, attr_name)) except Exception: pass # Skip attributes that can't be copied return mock_module except FileNotFoundError as e: error_msg = f"File not found: {str(e)}" print(f"Error: {error_msg}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) except Exception as e: error_msg = f"Unexpected error: {str(e)}" print(f"Error loading module {file_path}: {e}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) @classmethod def load_all_implementations(cls, directory: str = None) -> Dict[str, Any]: """Load all implementation files in the directory, including those with errors.""" if directory is None: directory = os.path.dirname(os.path.abspath(__file__)) implementations = {} implementation_files = cls.discover_implementation_files(directory) if not implementation_files: print("WARNING: No implementation files found. Check your file naming patterns.") for file_path in implementation_files: module_name = os.path.basename(file_path).replace('.py', '') module = cls.load_module(file_path, module_name) # Always add the module, even if it has errors implementations[module_name] = module if hasattr(module, '__error__'): print(f"Loaded with errors: {module_name} - {module.__error__}") else: print(f"Successfully loaded: {module_name}") return implementations class TestResultsManager: def __init__(self): self.results = {} self.sandbox_name = os.path.basename(os.path.dirname(os.path.abspath(__file__))) def record_result(self, impl_name: str, test_name: str, passed: bool, error_msg: Optional[str] = None) -> None: """Record a test result for an implementation.""" if impl_name not in self.results: self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []} if passed: self.results[impl_name]["passed"] += 1 else: self.results[impl_name]["failed"] += 1 if error_msg: self.results[impl_name]["errors"].append({ "test": test_name, "error": error_msg }) def record_skip(self, impl_name: str, test_name: str, reason: Optional[str] = None) -> None: """Record a skipped test for an implementation.""" if impl_name not in self.results: self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []} self.results[impl_name]["skipped"] += 1 if reason: self.results[impl_name]["errors"].append({ "test": test_name, "error": f"SKIPPED: {reason}" }) def get_winner(self) -> Tuple[Optional[int], Dict]: """Determine the winner based on test results.""" winner = None max_passed = -1 for impl_name, results in self.results.items(): if impl_name == "original_code": continue # Skip original code when determining winner if results["passed"] > max_passed: max_passed = results["passed"] winner = impl_name # Break ties by looking at failure count elif results["passed"] == max_passed and winner is not None: if results["failed"] < self.results[winner]["failed"]: winner = impl_name # Convert winner to numeric index if possible winner_index = -1 if winner and re.match(r'modified_code\d+', winner): try: winner_index = int(re.search(r'(\d+)', winner).group(1)) except (AttributeError, ValueError): pass return winner_index, self.results def save_results(self, filename: str = "test_results.json") -> None: """Save test results to a JSON file.""" import json winner_index, results = self.get_winner() # Check if all tests were skipped all_skipped = all( stats["skipped"] == stats["passed"] + stats["failed"] + stats["skipped"] for impl_name, stats in results.items() if impl_name != "original_code" ) output = { "winner": winner_index, "all_skipped": all_skipped, "results": { name: { "passed": stats["passed"], "failed": stats["failed"], "skipped": stats["skipped"], "total": stats["passed"] + stats["failed"] + stats["skipped"] } for name, stats in results.items() if not name.startswith("_") # Skip internal items } } with open(filename, "w") as f: json.dump(output, f, indent=2) print(f"Test results saved to {filename}") return output
test
null
null
null
null
null
6
python
create a flask app that shows the current date and time
import pytest import re import sys import importlib from flask.testing import FlaskClient from datetime import datetime, timedelta from unittest.mock import patch, MagicMock from importlib import util from contextlib import contextmanager @contextmanager def import_module_from_path(module_path): """Context manager to import a module from a path and then remove it from sys.modules.""" name = f"temp_module_{hash(module_path)}" spec = util.spec_from_file_location(name, module_path) module = util.module_from_spec(spec) sys.modules[name] = module spec.loader.exec_module(module) try: yield module finally: if name in sys.modules: del sys.modules[name] def test_module_imports(implementation): """Test if implementation imports the necessary modules.""" impl_name, module = implementation # Skip original_code tests as it's known to be missing implementations if impl_name == "original_code": pytest.skip( "Skipping original_code as it's known to be missing implementations" ) # Check if Flask is imported assert hasattr(module, "Flask"), f"{impl_name} should import Flask from flask" # Check if datetime is imported assert "datetime" in dir(module) or hasattr( module, "datetime" ), f"{impl_name} should import datetime" def test_app_creation(implementation): """Test if implementation creates a Flask app.""" impl_name, module = implementation assert hasattr(module, "app"), f"{impl_name} should create a Flask app instance" assert isinstance( module.app, module.Flask ), f"{impl_name} should create a Flask app instance" def test_route_definition(implementation): """Test if implementation defines a route for the root URL.""" impl_name, module = implementation # Get the URL map from the app url_map = module.app.url_map # Check if the root URL is in the map root_route_exists = any(rule.rule == "/" for rule in url_map.iter_rules()) assert ( root_route_exists ), f"{impl_name} should define a route for the root URL ('/')" def test_datetime_display(implementation): """Test if implementation displays the current date and time.""" impl_name, module = implementation # Create a test client client = module.app.test_client() # Set a fixed datetime for testing fixed_datetime = datetime(2023, 1, 1, 12, 0, 0) formatted_time = fixed_datetime.strftime("%Y-%m-%d %H:%M:%S") # The key issue: We need to patch the datetime module within the implementation module # Get module name for patching module_name = module.__name__ # Patch datetime in the implementation module patch_path = f"{module_name}.datetime" with patch(patch_path) as mock_datetime: # Configure the mock mock_now = MagicMock() mock_now.return_value = fixed_datetime mock_datetime.now = mock_now # Make a request to the root URL response = client.get("/") # Check if the response contains the expected date and time assert ( response.status_code == 200 ), f"{impl_name} should return a 200 status code" # Convert the response data to string if it's bytes response_text = ( response.data.decode("utf-8") if isinstance(response.data, bytes) else response.data ) # Check if the formatted time is in the response assert formatted_time in response_text, ( f"{impl_name} should display the current date and time: " f"Expected '{formatted_time}' in '{response_text}'" ) def test_app_functionality_with_client(implementation): """Test full app functionality using test client.""" impl_name, module = implementation # Create a test client client = module.app.test_client() # Make a request to the root URL response = client.get("/") # Check if the response contains any date-time format assert response.status_code == 200, f"{impl_name} should return a 200 status code" response_text = response.data.decode("utf-8") # Look for date-time patterns (YYYY-MM-DD HH:MM:SS) datetime_pattern = r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}" assert re.search( datetime_pattern, response_text ), f"{impl_name} should display date and time in a standard format" def test_time_accuracy(implementation): """Test if the displayed time is accurate within the implementation.""" impl_name, module = implementation # Create a test client client = module.app.test_client() # Set a fixed datetime for testing fixed_time = datetime(2023, 1, 1, 12, 0, 0) # Patch datetime.now in the implementation module module_name = module.__name__ with patch(f"{module_name}.datetime") as mock_datetime: # Configure the mock to return our fixed time mock_now = MagicMock() mock_now.return_value = fixed_time mock_datetime.now = mock_now mock_datetime.strptime = datetime.strptime # Make a request to the root URL response = client.get("/") # Check status code assert response.status_code == 200 # Convert response to text response_text = response.data.decode("utf-8") # Check if the response contains our fixed time formatted_time = fixed_time.strftime("%Y-%m-%d %H:%M:%S") assert ( formatted_time in response_text ), f"{impl_name} should display the specified time: {formatted_time}"
flask pytest pytest-mock
import pytest import os import sys import json from typing import Dict, List, Optional, Any # Import from local test_utils.py in the same directory from test_utils import TestUtils, TestResultsManager # Load all implementations in the current sandbox implementations = TestUtils.load_all_implementations() test_results = TestResultsManager() @pytest.fixture(scope="session") def sandbox_dir(): """Fixture to provide the sandbox directory path.""" return os.path.dirname(os.path.abspath(__file__)) @pytest.fixture(scope="session") def sandbox_name(): """Fixture to provide the sandbox name.""" return os.path.basename(os.path.dirname(os.path.abspath(__file__))) @pytest.fixture(scope="session") def all_implementations(): """Fixture to provide all implementations as a dictionary.""" return implementations @pytest.fixture(params=list(implementations.items())) def implementation(request): """Fixture to provide each implementation to tests one at a time.""" return request.param @pytest.fixture(scope="session") def results_manager(): """Fixture to provide access to the test results manager.""" return test_results # Hook for collecting test results @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Pytest hook to collect test results.""" # Execute all other hooks to obtain the report object outcome = yield rep = outcome.get_result() # We're only interested in the call outcome if rep.when == "call": if hasattr(item, "callspec") and "implementation" in item.callspec.params: # Get implementation name and module impl_name, _ = item.callspec.params["implementation"] # Get test name test_name = item.nodeid.split("::")[-1] # Record result if rep.passed: test_results.record_result(impl_name, test_name, True) elif rep.failed: error_msg = str(rep.longrepr) if rep.longrepr else "Test failed" test_results.record_result(impl_name, test_name, False, error_msg) elif rep.skipped: skip_reason = rep.longrepr[2] if rep.longrepr else "Test skipped" test_results.record_skip(impl_name, test_name, skip_reason) # Hook to save results at the end of testing @pytest.hookimpl(trylast=True) def pytest_sessionfinish(session, exitstatus): """Save test results at the end of the test session.""" test_results.save_results()
import os import sys import glob import re import importlib.util import traceback import types from typing import Dict, List, Optional, Any, Tuple class TestUtils: @staticmethod def discover_implementation_files(directory: str = None) -> List[str]: """Find all implementation files in the current sandbox directory.""" if directory is None: directory = os.path.dirname(os.path.abspath(__file__)) patterns = [ r'modified_code\d+\.py', r'new_code\d+\.py', r'original_code\.py', r'implementation\d*\.py' ] pattern = re.compile('|'.join(f'({p})' for p in patterns)) implementations = [] for file_path in glob.glob(os.path.join(directory, '*.py')): if pattern.search(os.path.basename(file_path)): implementations.append(file_path) # Sort files numerically def sort_key(path): filename = os.path.basename(path) match = re.search(r'(\d+)', filename) return int(match.group(1)) if match else 0 return sorted(implementations, key=sort_key) @staticmethod def create_mock_module(file_path: str, module_name: str, error_info: str) -> types.ModuleType: """Create a mock module that contains error information but can still be tested.""" # Create a new module object mock_module = types.ModuleType(module_name) # Add basic attributes mock_module.__file__ = file_path mock_module.__name__ = module_name mock_module.__display_name__ = module_name mock_module.__error__ = error_info # Add a dummy function that can be detected by test functions def dummy_function(*args, **kwargs): return f"Error in module: {error_info}" setattr(mock_module, "implementation_error", dummy_function) return mock_module @staticmethod def load_module(file_path: str, module_name: Optional[str] = None) -> Any: """ Safely load a module from a file path with proper error handling. If the module has errors, return a mock module that can still be tested. """ if module_name is None: module_name = os.path.basename(file_path).replace('.py', '') # Create a unique module name to avoid conflicts sandbox_id = os.path.basename(os.path.dirname(file_path)) unique_module_name = f"{sandbox_id}_{module_name}" try: # First, try to read the file to check for syntax errors with open(file_path, 'r') as f: source_code = f.read() # Check for syntax errors by compiling the code try: compiled = compile(source_code, file_path, 'exec') except SyntaxError as e: error_msg = f"Syntax error: {str(e)}" print(f"Syntax error in {file_path}: {e}") print(f" Line {e.lineno}, column {e.offset}: {e.text}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Create the module spec spec = importlib.util.spec_from_file_location(unique_module_name, file_path) if spec is None or spec.loader is None: error_msg = f"Could not create spec for {file_path}" print(f"Error: {error_msg}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Create the module object module = importlib.util.module_from_spec(spec) sys.modules[unique_module_name] = module # Special handling for execution errors try: # Execute the module code in a safe way spec.loader.exec_module(module) # Store the original name for reference module.__display_name__ = module_name return module except Exception as e: error_msg = f"Runtime error: {str(e)}" traceback_str = traceback.format_exc() print(f"Error executing module {file_path}: {e}") print(traceback_str) # Create a partial module that contains what we loaded before the error mock_module = TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Copy any attributes that might have been defined before the error for attr_name in dir(module): if not attr_name.startswith('__'): try: setattr(mock_module, attr_name, getattr(module, attr_name)) except Exception: pass # Skip attributes that can't be copied return mock_module except FileNotFoundError as e: error_msg = f"File not found: {str(e)}" print(f"Error: {error_msg}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) except Exception as e: error_msg = f"Unexpected error: {str(e)}" print(f"Error loading module {file_path}: {e}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) @classmethod def load_all_implementations(cls, directory: str = None) -> Dict[str, Any]: """Load all implementation files in the directory, including those with errors.""" if directory is None: directory = os.path.dirname(os.path.abspath(__file__)) implementations = {} implementation_files = cls.discover_implementation_files(directory) if not implementation_files: print("WARNING: No implementation files found. Check your file naming patterns.") for file_path in implementation_files: module_name = os.path.basename(file_path).replace('.py', '') module = cls.load_module(file_path, module_name) # Always add the module, even if it has errors implementations[module_name] = module if hasattr(module, '__error__'): print(f"Loaded with errors: {module_name} - {module.__error__}") else: print(f"Successfully loaded: {module_name}") return implementations class TestResultsManager: def __init__(self): self.results = {} self.sandbox_name = os.path.basename(os.path.dirname(os.path.abspath(__file__))) def record_result(self, impl_name: str, test_name: str, passed: bool, error_msg: Optional[str] = None) -> None: """Record a test result for an implementation.""" if impl_name not in self.results: self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []} if passed: self.results[impl_name]["passed"] += 1 else: self.results[impl_name]["failed"] += 1 if error_msg: self.results[impl_name]["errors"].append({ "test": test_name, "error": error_msg }) def record_skip(self, impl_name: str, test_name: str, reason: Optional[str] = None) -> None: """Record a skipped test for an implementation.""" if impl_name not in self.results: self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []} self.results[impl_name]["skipped"] += 1 if reason: self.results[impl_name]["errors"].append({ "test": test_name, "error": f"SKIPPED: {reason}" }) def get_winner(self) -> Tuple[Optional[int], Dict]: """Determine the winner based on test results.""" winner = None max_passed = -1 for impl_name, results in self.results.items(): if impl_name == "original_code": continue # Skip original code when determining winner if results["passed"] > max_passed: max_passed = results["passed"] winner = impl_name # Break ties by looking at failure count elif results["passed"] == max_passed and winner is not None: if results["failed"] < self.results[winner]["failed"]: winner = impl_name # Convert winner to numeric index if possible winner_index = -1 if winner and re.match(r'modified_code\d+', winner): try: winner_index = int(re.search(r'(\d+)', winner).group(1)) except (AttributeError, ValueError): pass return winner_index, self.results def save_results(self, filename: str = "test_results.json") -> None: """Save test results to a JSON file.""" import json winner_index, results = self.get_winner() # Check if all tests were skipped all_skipped = all( stats["skipped"] == stats["passed"] + stats["failed"] + stats["skipped"] for impl_name, stats in results.items() if impl_name != "original_code" ) output = { "winner": winner_index, "all_skipped": all_skipped, "results": { name: { "passed": stats["passed"], "failed": stats["failed"], "skipped": stats["skipped"], "total": stats["passed"] + stats["failed"] + stats["skipped"] } for name, stats in results.items() if not name.startswith("_") # Skip internal items } } with open(filename, "w") as f: json.dump(output, f, indent=2) print(f"Test results saved to {filename}") return output
test
null
null
null
null
null
7
python
# Write binary search
binary search on python
import inspect import pytest import random import time import sys def test_binary_search_function_exists(implementation): """Test if binary_search function exists in the implementation.""" impl_name, module = implementation if impl_name == "original_code": pytest.skip(f"{impl_name}: binary_search function not present in original code") assert hasattr(module, "binary_search"), f"{impl_name}: binary_search function not found" def test_binary_search_signature(implementation): """Test if binary_search has the correct signature.""" impl_name, module = implementation if impl_name == "original_code": pytest.skip(f"{impl_name}: binary_search function not present in original code") sig = inspect.signature(module.binary_search) assert len(sig.parameters) == 2, f"{impl_name}: binary_search should take exactly 2 parameters" def test_binary_search_with_empty_array(implementation): """Test binary_search with an empty array.""" impl_name, module = implementation if impl_name == "original_code": pytest.skip(f"{impl_name}: binary_search function not present in original code") try: result = module.binary_search([], 1) assert result == -1, f"{impl_name}: binary_search should return -1 for empty array" except IndexError: if impl_name == "original_modified_code2": pytest.xfail(f"{impl_name}: binary_search fails with IndexError on empty array") else: assert False, f"{impl_name}: binary_search should handle empty arrays without raising IndexError" def test_binary_search_target_found(implementation): """Test binary_search with an array containing the target.""" impl_name, module = implementation if impl_name == "original_code": pytest.skip(f"{impl_name}: binary_search function not present in original code") test_cases = [ ([1], 1, 0), # Single element array ([1, 2, 3, 4, 5], 1, 0), # Target at beginning ([1, 2, 3, 4, 5], 3, 2), # Target in middle ([1, 2, 3, 4, 5], 5, 4), # Target at end ([1, 3, 5, 7, 9, 11], 7, 3) # Different array values ] for arr, target, expected in test_cases: result = module.binary_search(arr, target) assert result == expected, f"{impl_name}: binary_search returned {result} instead of {expected} for {arr} and target {target}" def test_binary_search_with_duplicates(implementation): """Test binary_search with arrays containing duplicate values.""" impl_name, module = implementation if impl_name == "original_code": pytest.skip(f"{impl_name}: binary_search function not present in original code") test_cases = [ ([1, 1, 2, 2, 3, 3], 2), ([5, 5, 5, 5, 5], 5), ([1, 1, 2, 3, 3, 3, 4, 4], 3) ] for arr, target in test_cases: result = module.binary_search(arr, target) # For arrays with duplicates, we verify the element was found at a valid index assert result != -1, f"{impl_name}: binary_search failed to find existing element {target} in {arr}" assert arr[result] == target, f"{impl_name}: binary_search found wrong element, got {arr[result]} instead of {target}" assert 0 <= result < len(arr), f"{impl_name}: binary_search returned invalid index {result}" def test_binary_search_target_not_found(implementation): """Test binary_search with an array not containing the target.""" impl_name, module = implementation if impl_name == "original_code": pytest.skip(f"{impl_name}: binary_search function not present in original code") test_cases = [ ([1, 2, 3, 4, 5], 6), # Target greater than all elements ([1, 2, 3, 4, 5], 0), # Target less than all elements ([1, 3, 5, 7, 9], 4), # Target between elements ([1, 3, 5, 7, 9], 8), # Target between elements ([10, 20, 30], 25) # Target between wider gaps ] for arr, target in test_cases: result = module.binary_search(arr, target) assert result == -1, f"{impl_name}: binary_search should return -1 when target {target} is not found in {arr}, got {result}" def test_binary_search_with_large_arrays(implementation): """Test binary_search with large arrays.""" impl_name, module = implementation if impl_name == "original_code": pytest.skip(f"{impl_name}: binary_search function not present in original code") # Large sorted array test with elements present large_arr = list(range(1000)) # Test multiple targets at different positions targets_to_test = [0, 42, 500, 999] for target in targets_to_test: result = module.binary_search(large_arr, target) assert result == target, f"{impl_name}: binary_search failed with large array, expected {target}, got {result}" # Test target not in array not_in_result = module.binary_search(large_arr, 1000) assert not_in_result == -1, f"{impl_name}: binary_search failed with target not in large array" # Test with negative target when not present not_in_result2 = module.binary_search(large_arr, -1) assert not_in_result2 == -1, f"{impl_name}: binary_search failed with negative target not in large array" def test_binary_search_with_non_integer_elements(implementation): """Test binary_search with arrays of non-integer elements.""" impl_name, module = implementation if impl_name == "original_code": pytest.skip(f"{impl_name}: binary_search function not present in original code") # Test with strings str_arr = ["apple", "banana", "cherry", "date", "elderberry"] str_result = module.binary_search(str_arr, "cherry") assert str_result == 2, f"{impl_name}: binary_search failed with string array, expected 2, got {str_result}" # Test with string not in array str_missing = module.binary_search(str_arr, "fig") assert str_missing == -1, f"{impl_name}: binary_search should return -1 for strings not in array" # Test with floats float_arr = [0.1, 0.2, 0.3, 0.4, 0.5] float_result = module.binary_search(float_arr, 0.3) assert float_result == 2, f"{impl_name}: binary_search failed with float array, expected 2, got {float_result}" # Test with float not in array float_missing = module.binary_search(float_arr, 0.6) assert float_missing == -1, f"{impl_name}: binary_search should return -1 for floats not in array" # Test with custom objects if supported try: # Simple comparable class class ComparableObj: def __init__(self, value): self.value = value def __eq__(self, other): if isinstance(other, ComparableObj): return self.value == other.value return False def __lt__(self, other): if isinstance(other, ComparableObj): return self.value < other.value return NotImplemented obj_arr = [ComparableObj(i) for i in range(5)] target = ComparableObj(3) obj_result = module.binary_search(obj_arr, target) assert obj_result == 3, f"{impl_name}: binary_search should work with comparable objects" except (TypeError, AttributeError): # Skip this part if custom objects aren't supported pass def test_binary_search_edge_cases(implementation): """Test binary_search with edge cases.""" impl_name, module = implementation if impl_name == "original_code": pytest.skip(f"{impl_name}: binary_search function not present in original code") # Test with single element arrays assert module.binary_search([42], 42) == 0, f"{impl_name}: binary_search failed with single element array when target present" assert module.binary_search([42], 43) == -1, f"{impl_name}: binary_search failed with single element array when target not present" # Test with two element arrays assert module.binary_search([1, 2], 1) == 0, f"{impl_name}: binary_search failed with two-element array, target at first position" assert module.binary_search([1, 2], 2) == 1, f"{impl_name}: binary_search failed with two-element array, target at second position" assert module.binary_search([1, 2], 3) == -1, f"{impl_name}: binary_search failed with two-element array, target not present" # Test with boundary values (using a smaller value to avoid potential integer overflow) large_num = sys.maxsize // 1000 large_arr = [large_num - 2, large_num - 1, large_num] assert module.binary_search(large_arr, large_num) == 2, f"{impl_name}: binary_search failed with large integer values" # Test with negative values neg_arr = [-10, -5, 0, 5, 10] assert module.binary_search(neg_arr, -5) == 1, f"{impl_name}: binary_search failed with negative values" # Edge case: first and last elements seq_arr = list(range(10)) assert module.binary_search(seq_arr, 0) == 0, f"{impl_name}: binary_search failed finding first element" assert module.binary_search(seq_arr, 9) == 9, f"{impl_name}: binary_search failed finding last element"
pytest pytest-mock
import pytest import os import sys import json from typing import Dict, List, Optional, Any # Import from local test_utils.py in the same directory from test_utils import TestUtils, TestResultsManager # Load all implementations in the current sandbox implementations = TestUtils.load_all_implementations() test_results = TestResultsManager() @pytest.fixture(scope="session") def sandbox_dir(): """Fixture to provide the sandbox directory path.""" return os.path.dirname(os.path.abspath(__file__)) @pytest.fixture(scope="session") def sandbox_name(): """Fixture to provide the sandbox name.""" return os.path.basename(os.path.dirname(os.path.abspath(__file__))) @pytest.fixture(scope="session") def all_implementations(): """Fixture to provide all implementations as a dictionary.""" return implementations @pytest.fixture(params=list(implementations.items())) def implementation(request): """Fixture to provide each implementation to tests one at a time.""" return request.param @pytest.fixture(scope="session") def results_manager(): """Fixture to provide access to the test results manager.""" return test_results # Hook for collecting test results @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Pytest hook to collect test results.""" # Execute all other hooks to obtain the report object outcome = yield rep = outcome.get_result() # We're only interested in the call outcome if rep.when == "call": if hasattr(item, "callspec") and "implementation" in item.callspec.params: # Get implementation name and module impl_name, _ = item.callspec.params["implementation"] # Get test name test_name = item.nodeid.split("::")[-1] # Record result if rep.passed: test_results.record_result(impl_name, test_name, True) elif rep.failed: error_msg = str(rep.longrepr) if rep.longrepr else "Test failed" test_results.record_result(impl_name, test_name, False, error_msg) elif rep.skipped: skip_reason = rep.longrepr[2] if rep.longrepr else "Test skipped" test_results.record_skip(impl_name, test_name, skip_reason) # Hook to save results at the end of testing @pytest.hookimpl(trylast=True) def pytest_sessionfinish(session, exitstatus): """Save test results at the end of the test session.""" test_results.save_results()
import os import sys import glob import re import importlib.util import traceback import types from typing import Dict, List, Optional, Any, Tuple class TestUtils: @staticmethod def discover_implementation_files(directory: str = None) -> List[str]: """Find all implementation files in the current sandbox directory.""" if directory is None: directory = os.path.dirname(os.path.abspath(__file__)) patterns = [ r'modified_code\d+\.py', r'new_code\d+\.py', r'original_code\.py', r'implementation\d*\.py' ] pattern = re.compile('|'.join(f'({p})' for p in patterns)) implementations = [] for file_path in glob.glob(os.path.join(directory, '*.py')): if pattern.search(os.path.basename(file_path)): implementations.append(file_path) # Sort files numerically def sort_key(path): filename = os.path.basename(path) match = re.search(r'(\d+)', filename) return int(match.group(1)) if match else 0 return sorted(implementations, key=sort_key) @staticmethod def create_mock_module(file_path: str, module_name: str, error_info: str) -> types.ModuleType: """Create a mock module that contains error information but can still be tested.""" # Create a new module object mock_module = types.ModuleType(module_name) # Add basic attributes mock_module.__file__ = file_path mock_module.__name__ = module_name mock_module.__display_name__ = module_name mock_module.__error__ = error_info # Add a dummy function that can be detected by test functions def dummy_function(*args, **kwargs): return f"Error in module: {error_info}" setattr(mock_module, "implementation_error", dummy_function) return mock_module @staticmethod def load_module(file_path: str, module_name: Optional[str] = None) -> Any: """ Safely load a module from a file path with proper error handling. If the module has errors, return a mock module that can still be tested. """ if module_name is None: module_name = os.path.basename(file_path).replace('.py', '') # Create a unique module name to avoid conflicts sandbox_id = os.path.basename(os.path.dirname(file_path)) unique_module_name = f"{sandbox_id}_{module_name}" try: # First, try to read the file to check for syntax errors with open(file_path, 'r') as f: source_code = f.read() # Check for syntax errors by compiling the code try: compiled = compile(source_code, file_path, 'exec') except SyntaxError as e: error_msg = f"Syntax error: {str(e)}" print(f"Syntax error in {file_path}: {e}") print(f" Line {e.lineno}, column {e.offset}: {e.text}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Create the module spec spec = importlib.util.spec_from_file_location(unique_module_name, file_path) if spec is None or spec.loader is None: error_msg = f"Could not create spec for {file_path}" print(f"Error: {error_msg}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Create the module object module = importlib.util.module_from_spec(spec) sys.modules[unique_module_name] = module # Special handling for execution errors try: # Execute the module code in a safe way spec.loader.exec_module(module) # Store the original name for reference module.__display_name__ = module_name return module except Exception as e: error_msg = f"Runtime error: {str(e)}" traceback_str = traceback.format_exc() print(f"Error executing module {file_path}: {e}") print(traceback_str) # Create a partial module that contains what we loaded before the error mock_module = TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Copy any attributes that might have been defined before the error for attr_name in dir(module): if not attr_name.startswith('__'): try: setattr(mock_module, attr_name, getattr(module, attr_name)) except Exception: pass # Skip attributes that can't be copied return mock_module except FileNotFoundError as e: error_msg = f"File not found: {str(e)}" print(f"Error: {error_msg}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) except Exception as e: error_msg = f"Unexpected error: {str(e)}" print(f"Error loading module {file_path}: {e}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) @classmethod def load_all_implementations(cls, directory: str = None) -> Dict[str, Any]: """Load all implementation files in the directory, including those with errors.""" if directory is None: directory = os.path.dirname(os.path.abspath(__file__)) implementations = {} implementation_files = cls.discover_implementation_files(directory) if not implementation_files: print("WARNING: No implementation files found. Check your file naming patterns.") for file_path in implementation_files: module_name = os.path.basename(file_path).replace('.py', '') module = cls.load_module(file_path, module_name) # Always add the module, even if it has errors implementations[module_name] = module if hasattr(module, '__error__'): print(f"Loaded with errors: {module_name} - {module.__error__}") else: print(f"Successfully loaded: {module_name}") return implementations class TestResultsManager: def __init__(self): self.results = {} self.sandbox_name = os.path.basename(os.path.dirname(os.path.abspath(__file__))) def record_result(self, impl_name: str, test_name: str, passed: bool, error_msg: Optional[str] = None) -> None: """Record a test result for an implementation.""" if impl_name not in self.results: self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []} if passed: self.results[impl_name]["passed"] += 1 else: self.results[impl_name]["failed"] += 1 if error_msg: self.results[impl_name]["errors"].append({ "test": test_name, "error": error_msg }) def record_skip(self, impl_name: str, test_name: str, reason: Optional[str] = None) -> None: """Record a skipped test for an implementation.""" if impl_name not in self.results: self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []} self.results[impl_name]["skipped"] += 1 if reason: self.results[impl_name]["errors"].append({ "test": test_name, "error": f"SKIPPED: {reason}" }) def get_winner(self) -> Tuple[Optional[int], Dict]: """Determine the winner based on test results.""" winner = None max_passed = -1 for impl_name, results in self.results.items(): if impl_name == "original_code": continue # Skip original code when determining winner if results["passed"] > max_passed: max_passed = results["passed"] winner = impl_name # Break ties by looking at failure count elif results["passed"] == max_passed and winner is not None: if results["failed"] < self.results[winner]["failed"]: winner = impl_name # Convert winner to numeric index if possible winner_index = -1 if winner and re.match(r'modified_code\d+', winner): try: winner_index = int(re.search(r'(\d+)', winner).group(1)) except (AttributeError, ValueError): pass return winner_index, self.results def save_results(self, filename: str = "test_results.json") -> None: """Save test results to a JSON file.""" import json winner_index, results = self.get_winner() # Check if all tests were skipped all_skipped = all( stats["skipped"] == stats["passed"] + stats["failed"] + stats["skipped"] for impl_name, stats in results.items() if impl_name != "original_code" ) output = { "winner": winner_index, "all_skipped": all_skipped, "results": { name: { "passed": stats["passed"], "failed": stats["failed"], "skipped": stats["skipped"], "total": stats["passed"] + stats["failed"] + stats["skipped"] } for name, stats in results.items() if not name.startswith("_") # Skip internal items } } with open(filename, "w") as f: json.dump(output, f, indent=2) print(f"Test results saved to {filename}") return output
test
null
null
null
null
null
8
python
# env: pyAI import os from openai import OpenAI import json def save_conversation(filename="conversation_history.json"): with open(filename, "w") as f: json.dump(conversation_history, f, ensure_ascii=False, indent=4) def load_conversation(filename="conversation_history.json"): try: with open(filename, "r") as f: conversation_history = json.load(f) print(f"Conversation history from {filename} loaded successfully.") return conversation_history except FileNotFoundError: print(f"No saved conversation history found for {filename}.") return None # token = os.environ["GITHUB_TOKEN"] endpoint = "https://models.inference.ai.azure.com" model_name = "gpt-4o" client = OpenAI( base_url=endpoint, api_key="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", ) # Ask the user if they want to load a conversation history load_history = input("Do you want to load a conversation history? (yes/no): ").strip().lower() conversation_history = [] if load_history == "yes": # Get all conversation history files in the current directory history_files = [f for f in os.listdir() if f.endswith(".json")] if history_files: print("Available conversation history files:") for i, file in enumerate(history_files, 1): print(f"{i}. {file}") choice = input("Enter the number of the conversation history file to load: ") try: choice = int(choice) if 1 <= choice <= len(history_files): history_file = history_files[choice - 1] loaded_history = load_conversation(history_file) if loaded_history is not None: conversation_history = loaded_history else: print("Invalid choice. Initializing new conversation history.") conversation_history = [ { "role": "system", "content": "You are a helpful assistant.", } ] except ValueError: print("Invalid input. Initializing new conversation history.") conversation_history = [ { "role": "system", "content": "You are a helpful assistant.", } ] else: print("No conversation history files found. Initializing new conversation history.") conversation_history = [ { "role": "system", "content": "You are a helpful assistant.", } ] else: # 初始化对话历史 conversation_history = [ { "role": "system", "content": "You are a helpful assistant.", } ] # 模拟连续对话 while True: user_input = input("User: ") if user_input.lower() in ["exit", "quit"]: print("Exiting the conversation.") break conversation_history.append({ "role": "user", "content": user_input }) response = client.chat.completions.create( messages=conversation_history, temperature=1.0, top_p=1.0, max_tokens=4086, model=model_name ) conversation_history.append(response.choices[0].message) print("GPT: ", response.choices[0].message.content) # Save the conversation history at the end save_conversation()
# env: pyAI import os from openai import OpenAI import json def save_conversation(filename="conversation_history.json"): with open(filename, "w") as f: json.dump(conversation_history, f, ensure_ascii=False, indent=4) def load_conversation(filename="conversation_history.json"): try: with open(filename, "r") as f: conversation_history = json.load(f) print(f"Conversation history from {filename} loaded successfully.") return conversation_history except FileNotFoundError: print(f"No saved conversation history found for {filename}.") return None # token = os.environ["GITHUB_TOKEN"] endpoint = "https://models.inference.ai.azure.com" model_name = "gpt-4o" client = OpenAI( base_url=endpoint, api_key="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", ) # Ask the user if they want to load a conversation history load_history = input("Do you want to load a conversation history? (yes/no): ").strip().lower() conversation_history = [] if load_history == "yes": # Get all conversation history files in the current directory history_files = [f for f in os.listdir() if f.endswith(".json")] if history_files: print("Available conversation history files:") for i, file in enumerate(history_files, 1): print(f"{i}. {file}") choice = input("Enter the number of the conversation history file to load: ") try: choice = int(choice) if 1 <= choice <= len(history_files): history_file = history_files[choice - 1] loaded_history = load_conversation(history_file) if loaded_history is not None: conversation_history = loaded_history else: print("Invalid choice. Initializing new conversation history.") conversation_history = [ { "role": "system", "content": "You are a helpful assistant.", } ] except ValueError: print("Invalid input. Initializing new conversation history.") conversation_history = [ { "role": "system", "content": "You are a helpful assistant.", } ] else: print("No conversation history files found. Initializing new conversation history.") conversation_history = [ { "role": "system", "content": "You are a helpful assistant.", } ] else: # 初始化对话历史 conversation_history = [ { "role": "system", "content": "You are a helpful assistant.", } ] # 模拟连续对话 while True: user_input = input("User: ") if user_input.lower() in ["exit", "quit"]: print("Exiting the conversation.") break conversation_history.append({ "role": "user", "content": user_input }) response = client.chat.completions.create( messages=conversation_history, temperature=1.0, top_p=1.0, max_tokens=4086, model=model_name ) conversation_history.append(response.choices[0].message) print("GPT: ", response.choices[0].message.content) # Save the conversation history at the end save_conversation()
修复代码中的错误
import pytest import os import json import sys import inspect import re from unittest.mock import patch, MagicMock, mock_open from io import StringIO @pytest.fixture def capture_stdout(): """Capture stdout for testing print statements""" buffer = StringIO() old_stdout = sys.stdout sys.stdout = buffer yield buffer sys.stdout = old_stdout def test_save_conversation_function_exists(implementation): """Test that save_conversation function exists""" impl_name, module = implementation # Check if the function exists directly or within source code has_function = hasattr(module, 'save_conversation') if not has_function: # Check if it's defined in the source code but not exported source = inspect.getsource(module) has_function = "def save_conversation" in source assert has_function, f"{impl_name}: save_conversation function should be defined" def test_save_conversation_function_parameter(implementation): """Test that save_conversation function has proper parameters""" impl_name, module = implementation # Skip if function doesn't exist if not hasattr(module, 'save_conversation'): pytest.skip(f"{impl_name}: save_conversation function not found") # Check the function signature for save_conversation sig = inspect.signature(module.save_conversation) param_names = list(sig.parameters.keys()) # Test passes if either: # 1. First parameter is conversation_history/history/conversations, or # 2. Function accepts filename as parameter and uses global conversation_history source = inspect.getsource(module.save_conversation) valid_param = ( # Either it has parameters and the first is appropriate (len(param_names) > 0 and param_names[0] in ['conversation_history', 'history', 'conversations']) or # Or it uses a global conversation_history variable ("conversation_history" in source and "json.dump" in source) ) assert valid_param, f"{impl_name}: save_conversation should either accept conversation_history as parameter or use global variable" def test_save_conversation_functionality(implementation): """Test that save_conversation correctly saves the conversation history""" impl_name, module = implementation # Skip if function doesn't exist if not hasattr(module, 'save_conversation'): pytest.skip(f"{impl_name}: save_conversation function not found") # Mock the open function to avoid writing to disk mock_file = mock_open() test_conversation = [{"role": "system", "content": "Test message"}] sig = inspect.signature(module.save_conversation) param_names = list(sig.parameters.keys()) try: # First, ensure the module has a conversation_history variable if needed source = inspect.getsource(module.save_conversation) # Set up our test with patch('builtins.open', mock_file): # Determine how to call the function based on its signature if len(param_names) > 0 and param_names[0] in ['conversation_history', 'history', 'conversations']: # Call with explicit conversation_history module.save_conversation(test_conversation) else: # For implementations using global variables # First, check if the variable is already defined in the module if not hasattr(module, 'conversation_history') and "conversation_history" in source: # Set the conversation_history global variable in the module module.conversation_history = test_conversation module.save_conversation() # Clean up after ourselves delattr(module, 'conversation_history') elif hasattr(module, 'conversation_history'): # Save existing value to restore later original_history = module.conversation_history # Set our test value module.conversation_history = test_conversation try: module.save_conversation() finally: # Restore the original value module.conversation_history = original_history else: # If no conversation_history is used, just call it directly module.save_conversation() # Check that file operations occurred assert mock_file.called, f"{impl_name}: save_conversation should open a file" handle = mock_file() assert handle.write.called, f"{impl_name}: save_conversation should write to file" except Exception as e: pytest.fail(f"{impl_name}: Error testing save_conversation: {str(e)}") class Any: """Helper class for flexible assertion matching""" def __eq__(self, other): return True def test_load_conversation_exists(implementation): """Test that load_conversation function exists""" impl_name, module = implementation # Check if the function exists directly or within source code has_function = hasattr(module, 'load_conversation') if not has_function: # Check if it's defined in the source code but not exported source = inspect.getsource(module) has_function = "def load_conversation" in source assert has_function, f"{impl_name}: load_conversation function should be defined" def test_load_conversation_functionality(implementation): """Test that load_conversation correctly loads the conversation history""" impl_name, module = implementation # Skip if function doesn't exist if not hasattr(module, 'load_conversation'): pytest.skip(f"{impl_name}: load_conversation function not found") test_conversation = [{"role": "system", "content": "Test message"}] mock_content = json.dumps(test_conversation) # Test successful load with patch('builtins.open', mock_open(read_data=mock_content)): result = module.load_conversation() assert isinstance(result, list), f"{impl_name}: load_conversation should return a list" # Some implementations might modify the loaded data, so we just check it's a list # Test FileNotFoundError handling - should not raise an exception with patch('builtins.open', side_effect=FileNotFoundError()): try: result = module.load_conversation() # It should either return None or an empty list assert result is None or result == [], \ f"{impl_name}: load_conversation should handle missing files gracefully" except Exception as e: pytest.fail(f"{impl_name}: load_conversation should handle FileNotFoundError but raised {str(e)}") def test_conversation_initialization(implementation): """Test that conversation_history initialization is present""" impl_name, module = implementation # Get the source code source = inspect.getsource(module) # Check for initialization patterns - be more flexible in patterns conversation_init = any([ "conversation_history = [" in source, "conversation_history=" in source, "history = [" in source ]) assert conversation_init, f"{impl_name}: conversation history should be initialized" # Check for system message initialization - be more flexible system_msg = any([ '"role": "system"' in source, "'role': 'system'" in source ]) assert system_msg, f"{impl_name}: conversation history should include a system role" def test_input_handling_exists(implementation): """Test that the code handles user input""" impl_name, module = implementation # Get the source code source = inspect.getsource(module) # Check for input handling assert "input(" in source, f"{impl_name}: code should include handling user input" # Check for conversation appending - be more flexible append_pattern = any([ "conversation_history.append" in source, "history.append" in source, ".append({" in source and "role" in source ]) assert append_pattern, f"{impl_name}: code should append to conversation history" # Check for exit/quit handling assert any(term in source.lower() for term in ["exit", "quit"]), \ f"{impl_name}: code should handle exit or quit commands" def test_load_history_workflow_structure(implementation): """Test that the core workflow for loading history is implemented""" impl_name, module = implementation # Get the source code source = inspect.getsource(module) # Check key workflow components - be more flexible load_pattern = any([ "load_history" in source, "load a conversation" in source, "load conversation" in source ]) assert load_pattern, f"{impl_name}: code should handle loading history option" # Check that we filter for JSON files - be more flexible json_pattern = any([ "endswith(\".json\")" in source, ".json" in source, "json files" in source.lower() ]) assert json_pattern, f"{impl_name}: code should handle JSON files" # Check that we have error handling - be more flexible error_pattern = ( ("try:" in source and "except" in source) or ("if" in source and "else" in source) ) assert error_pattern, f"{impl_name}: code should include error handling for user choices" def test_completion_api_usage(implementation): """Test that the OpenAI API is used correctly""" impl_name, module = implementation # Get the source code source = inspect.getsource(module) # Check for API client instantiation assert "OpenAI(" in source, f"{impl_name}: code should instantiate OpenAI client" # Check for API call patterns - be more flexible api_call_pattern = any([ "client.chat.completions.create" in source, "client.chat_completions.create" in source, "chat.completions.create" in source ]) assert api_call_pattern, f"{impl_name}: code should call chat completions API" # Check that we're passing conversation history to the API - be more flexible messages_pattern = any([ "messages=conversation_history" in source, "messages = conversation_history" in source, "messages=history" in source ]) assert messages_pattern, f"{impl_name}: code should pass conversation history to the API" def test_save_conversation_called(implementation): """Test that save_conversation is called at the end""" impl_name, module = implementation # Skip if function doesn't exist if not hasattr(module, 'save_conversation'): pytest.skip(f"{impl_name}: save_conversation function not found") # Get the source code source = inspect.getsource(module) # Look for save call at the end sections main_part = re.findall(r'# Save .*|#.*save.*|save_conversation\(', source, re.IGNORECASE) # Check if save_conversation is called save_call_pattern = any([ "save_conversation(" in source, "save(" in source and "conversation" in source ]) assert save_call_pattern, f"{impl_name}: save_conversation should be called in the code" def test_proper_json_handling(implementation): """Test that the code properly handles JSON operations""" impl_name, module = implementation # Skip if functions don't exist if not hasattr(module, 'save_conversation') or not hasattr(module, 'load_conversation'): pytest.skip(f"{impl_name}: save_conversation or load_conversation function not found") # Get the source code of the functions save_source = inspect.getsource(module.save_conversation) load_source = inspect.getsource(module.load_conversation) # Check for proper JSON operations in save - be more flexible save_json_pattern = any([ "json.dump" in save_source, "dump(" in save_source and "json" in save_source ]) assert save_json_pattern, f"{impl_name}: save_conversation should use json operations" # Check for proper JSON operations in load - be more flexible load_json_pattern = any([ "json.load" in load_source, "load(" in load_source and "json" in load_source ]) assert load_json_pattern, f"{impl_name}: load_conversation should use json operations" # Check for proper file operations in save - be more flexible assert 'open(' in save_source, f"{impl_name}: save_conversation should open a file" # Check for proper file operations in load assert 'open(' in load_source, f"{impl_name}: load_conversation should open a file" def test_client_configuration(implementation): """Test that the OpenAI client is configured properly""" impl_name, module = implementation # Get the source code source = inspect.getsource(module) # Check for proper client configuration - be more flexible assert "base_url" in source, f"{impl_name}: OpenAI client should have base_url configured" api_key_pattern = any([ "api_key" in source, "API_KEY" in source, "apikey" in source.lower() ]) assert api_key_pattern, f"{impl_name}: OpenAI client should have API key configured" # Check for proper model configuration in API call model_pattern = any([ "model=" in source, "model =" in source, "model:" in source ]) assert model_pattern, f"{impl_name}: API call should specify a model parameter" def test_main_loop_implementation(implementation): """Test that the main conversation loop is correctly implemented""" impl_name, module = implementation # Get the source code source = inspect.getsource(module) # Check for a loop structure loop_pattern = any([ "while " in source, "for " in source and "input" in source ]) assert loop_pattern, f"{impl_name}: code should contain a conversation loop" # Check that responses are displayed to the user print_pattern = ( "print(" in source and any(["response" in source, "content" in source, "message" in source]) ) assert print_pattern, f"{impl_name}: code should print responses to the user" @patch('builtins.input') @patch('builtins.open', new_callable=mock_open) @patch('os.listdir') def test_load_history_interaction(mock_listdir, mock_open_file, mock_input, implementation): """Test the history loading interaction flow""" impl_name, module = implementation # Skip if load_conversation doesn't exist if not hasattr(module, 'load_conversation'): pytest.skip(f"{impl_name}: load_conversation function not found") # Setup mocks mock_listdir.return_value = ['history1.json', 'history2.json'] mock_input.side_effect = ['yes', '1'] # Create a simple patch for load_conversation to avoid actual execution with patch.object(module, 'load_conversation', return_value=[{"role": "system", "content": "Test assistant"}]): # This is a minimal test to verify load_conversation exists and can be called assert callable(module.load_conversation), f"{impl_name}: load_conversation should be callable"
pytest pytest-mock openai
import pytest import os import sys import json from typing import Dict, List, Optional, Any # Import from local test_utils.py in the same directory from test_utils import TestUtils, TestResultsManager # Load all implementations in the current sandbox implementations = TestUtils.load_all_implementations() test_results = TestResultsManager() @pytest.fixture(scope="session") def sandbox_dir(): """Fixture to provide the sandbox directory path.""" return os.path.dirname(os.path.abspath(__file__)) @pytest.fixture(scope="session") def sandbox_name(): """Fixture to provide the sandbox name.""" return os.path.basename(os.path.dirname(os.path.abspath(__file__))) @pytest.fixture(scope="session") def all_implementations(): """Fixture to provide all implementations as a dictionary.""" return implementations @pytest.fixture(params=list(implementations.items())) def implementation(request): """Fixture to provide each implementation to tests one at a time.""" return request.param @pytest.fixture(scope="session") def results_manager(): """Fixture to provide access to the test results manager.""" return test_results # Hook for collecting test results @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Pytest hook to collect test results.""" # Execute all other hooks to obtain the report object outcome = yield rep = outcome.get_result() # We're only interested in the call outcome if rep.when == "call": if hasattr(item, "callspec") and "implementation" in item.callspec.params: # Get implementation name and module impl_name, _ = item.callspec.params["implementation"] # Get test name test_name = item.nodeid.split("::")[-1] # Record result if rep.passed: test_results.record_result(impl_name, test_name, True) elif rep.failed: error_msg = str(rep.longrepr) if rep.longrepr else "Test failed" test_results.record_result(impl_name, test_name, False, error_msg) elif rep.skipped: skip_reason = rep.longrepr[2] if rep.longrepr else "Test skipped" test_results.record_skip(impl_name, test_name, skip_reason) # Hook to save results at the end of testing @pytest.hookimpl(trylast=True) def pytest_sessionfinish(session, exitstatus): """Save test results at the end of the test session.""" test_results.save_results()
import os import sys import glob import re import importlib.util import traceback import types from typing import Dict, List, Optional, Any, Tuple class TestUtils: @staticmethod def discover_implementation_files(directory: str = None) -> List[str]: """Find all implementation files in the current sandbox directory.""" if directory is None: directory = os.path.dirname(os.path.abspath(__file__)) patterns = [ r'modified_code\d+\.py', r'new_code\d+\.py', r'original_code\.py', r'implementation\d*\.py' ] pattern = re.compile('|'.join(f'({p})' for p in patterns)) implementations = [] for file_path in glob.glob(os.path.join(directory, '*.py')): if pattern.search(os.path.basename(file_path)): implementations.append(file_path) # Sort files numerically def sort_key(path): filename = os.path.basename(path) match = re.search(r'(\d+)', filename) return int(match.group(1)) if match else 0 return sorted(implementations, key=sort_key) @staticmethod def create_mock_module(file_path: str, module_name: str, error_info: str) -> types.ModuleType: """Create a mock module that contains error information but can still be tested.""" # Create a new module object mock_module = types.ModuleType(module_name) # Add basic attributes mock_module.__file__ = file_path mock_module.__name__ = module_name mock_module.__display_name__ = module_name mock_module.__error__ = error_info # Add a dummy function that can be detected by test functions def dummy_function(*args, **kwargs): return f"Error in module: {error_info}" setattr(mock_module, "implementation_error", dummy_function) return mock_module @staticmethod def load_module(file_path: str, module_name: Optional[str] = None) -> Any: """ Safely load a module from a file path with proper error handling. If the module has errors, return a mock module that can still be tested. """ if module_name is None: module_name = os.path.basename(file_path).replace('.py', '') # Create a unique module name to avoid conflicts sandbox_id = os.path.basename(os.path.dirname(file_path)) unique_module_name = f"{sandbox_id}_{module_name}" try: # First, try to read the file to check for syntax errors with open(file_path, 'r') as f: source_code = f.read() # Check for syntax errors by compiling the code try: compiled = compile(source_code, file_path, 'exec') except SyntaxError as e: error_msg = f"Syntax error: {str(e)}" print(f"Syntax error in {file_path}: {e}") print(f" Line {e.lineno}, column {e.offset}: {e.text}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Create the module spec spec = importlib.util.spec_from_file_location(unique_module_name, file_path) if spec is None or spec.loader is None: error_msg = f"Could not create spec for {file_path}" print(f"Error: {error_msg}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Create the module object module = importlib.util.module_from_spec(spec) sys.modules[unique_module_name] = module # Special handling for execution errors try: # Execute the module code in a safe way spec.loader.exec_module(module) # Store the original name for reference module.__display_name__ = module_name return module except Exception as e: error_msg = f"Runtime error: {str(e)}" traceback_str = traceback.format_exc() print(f"Error executing module {file_path}: {e}") print(traceback_str) # Create a partial module that contains what we loaded before the error mock_module = TestUtils.create_mock_module(file_path, unique_module_name, error_msg) # Copy any attributes that might have been defined before the error for attr_name in dir(module): if not attr_name.startswith('__'): try: setattr(mock_module, attr_name, getattr(module, attr_name)) except Exception: pass # Skip attributes that can't be copied return mock_module except FileNotFoundError as e: error_msg = f"File not found: {str(e)}" print(f"Error: {error_msg}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) except Exception as e: error_msg = f"Unexpected error: {str(e)}" print(f"Error loading module {file_path}: {e}") return TestUtils.create_mock_module(file_path, unique_module_name, error_msg) @classmethod def load_all_implementations(cls, directory: str = None) -> Dict[str, Any]: """Load all implementation files in the directory, including those with errors.""" if directory is None: directory = os.path.dirname(os.path.abspath(__file__)) implementations = {} implementation_files = cls.discover_implementation_files(directory) if not implementation_files: print("WARNING: No implementation files found. Check your file naming patterns.") for file_path in implementation_files: module_name = os.path.basename(file_path).replace('.py', '') module = cls.load_module(file_path, module_name) # Always add the module, even if it has errors implementations[module_name] = module if hasattr(module, '__error__'): print(f"Loaded with errors: {module_name} - {module.__error__}") else: print(f"Successfully loaded: {module_name}") return implementations class TestResultsManager: def __init__(self): self.results = {} self.sandbox_name = os.path.basename(os.path.dirname(os.path.abspath(__file__))) def record_result(self, impl_name: str, test_name: str, passed: bool, error_msg: Optional[str] = None) -> None: """Record a test result for an implementation.""" if impl_name not in self.results: self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []} if passed: self.results[impl_name]["passed"] += 1 else: self.results[impl_name]["failed"] += 1 if error_msg: self.results[impl_name]["errors"].append({ "test": test_name, "error": error_msg }) def record_skip(self, impl_name: str, test_name: str, reason: Optional[str] = None) -> None: """Record a skipped test for an implementation.""" if impl_name not in self.results: self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []} self.results[impl_name]["skipped"] += 1 if reason: self.results[impl_name]["errors"].append({ "test": test_name, "error": f"SKIPPED: {reason}" }) def get_winner(self) -> Tuple[Optional[int], Dict]: """Determine the winner based on test results.""" winner = None max_passed = -1 for impl_name, results in self.results.items(): if impl_name == "original_code": continue # Skip original code when determining winner if results["passed"] > max_passed: max_passed = results["passed"] winner = impl_name # Break ties by looking at failure count elif results["passed"] == max_passed and winner is not None: if results["failed"] < self.results[winner]["failed"]: winner = impl_name # Convert winner to numeric index if possible winner_index = -1 if winner and re.match(r'modified_code\d+', winner): try: winner_index = int(re.search(r'(\d+)', winner).group(1)) except (AttributeError, ValueError): pass return winner_index, self.results def save_results(self, filename: str = "test_results.json") -> None: """Save test results to a JSON file.""" import json winner_index, results = self.get_winner() # Check if all tests were skipped all_skipped = all( stats["skipped"] == stats["passed"] + stats["failed"] + stats["skipped"] for impl_name, stats in results.items() if impl_name != "original_code" ) output = { "winner": winner_index, "all_skipped": all_skipped, "results": { name: { "passed": stats["passed"], "failed": stats["failed"], "skipped": stats["skipped"], "total": stats["passed"] + stats["failed"] + stats["skipped"] } for name, stats in results.items() if not name.startswith("_") # Skip internal items } } with open(filename, "w") as f: json.dump(output, f, indent=2) print(f"Test results saved to {filename}") return output
test
null
null
null
null
null
9
python
"import os\nimport random\nimport torch\nimport numpy as np\nfrom sklearn.metrics.pairwise import co(...TRUNCATED)
"def visualize_results_grid(results_df):\n columns = [results_df.iloc[:, i] for i in range(len(re(...TRUNCATED)
make it work with 4 or more columns
"import pytest\nimport pandas as pd\nimport numpy as np\nimport inspect\nfrom unittest.mock import p(...TRUNCATED)
"pandas\nnumpy\nmatplotlib\npytest\npytest-mock\nseaborn\npillow\ntorch\ntorchvision\nscikit-learn\n(...TRUNCATED)
"import pytest\nimport os\nimport sys\nimport json\nfrom typing import Dict, List, Optional, Any\n\n(...TRUNCATED)
"import os\nimport sys\nimport glob\nimport re\nimport importlib.util\nimport traceback\nimport type(...TRUNCATED)
test
null
null
null
null
null
10
python
"def is_sum_of_four_squares(n):\n if n < 0:\n return False\n for a in range(int(n**0.5)(...TRUNCATED)
"def is_sum_of_four_squares(n):\n if n < 0:\n return False\n for a in range(int(n**0.5)(...TRUNCATED)
Números que podem ser expressos como a soma de quatro quadrados não nulos:
"import pytest\nimport io\nimport sys\nfrom unittest.mock import patch, MagicMock\nimport inspect\ni(...TRUNCATED)
pytest pytest-mock
"import pytest\nimport os\nimport sys\nimport json\nfrom typing import Dict, List, Optional, Any\n\n(...TRUNCATED)
"import os\nimport sys\nimport glob\nimport re\nimport importlib.util\nimport traceback\nimport type(...TRUNCATED)
test
null
null
null
null
null
End of preview. Expand in Data Studio

EditBench Dataset

This dataset contains code editing tasks extracted from the EditBench evaluation framework specifically designed for evaluating model performance on code editing tasks. It is provided as a test-only benchmark. Each sample includes:

Core Files (Python)

  • original_code.py: Starting code file
  • highlighted_code.py: Specific section of code to be modified
  • instruction.txt: User instructions for the task
  • test_code.py: Tests that validate the implementation

Supporting Files (Python)

  • requirements.txt: Dependencies needed to run the code
  • conftest.py: Pytest configuration
  • test_utils.py: Utilities for testing

Core Files (JavaScript)

  • original_code.js: Starting code file (or .jsx)
  • highlighted_code.js: Specific section of code to be modified
  • instruction.txt: User instructions for the task
  • test_code: Tests that validate the implementation (from tests/*.test.js)
  • package_json: NPM package configuration
  • jest_setup: Jest testing setup (if applicable)
  • babel_config: Babel configuration (if applicable)
  • other_files: Additional files needed for the project

Dataset Statistics

  • Total samples: 113
  • Python samples: 104
  • JavaScript samples: 9
  • Expected samples: 113 (57 easy + 56 hard questions)
  • Found samples: 113 / 113

Usage

This dataset is provided as a test-only benchmark and can be loaded directly with the Hugging Face Datasets library:

from datasets import load_dataset

# Note that this dataset only has a 'test' split
dataset = load_dataset("your-username/editbench", split="test")

Ethical Considerations and Limitations

  • This dataset is provided exclusively for benchmark/evaluation purposes
  • Models should NOT be trained on this dataset, as it is specifically designed to test model capabilities
  • Hugging Face's Terms of Service prohibit using benchmark datasets for training
  • We recommend implementing your model's training pipeline to explicitly exclude this dataset

Citation

If you use this dataset, please cite the original EditBench work.

@misc{chi2025editbench,
  title        = {EditBench: Evaluating LLM Abilities to Perform Real-World Code Edits},
  author       = {Wayne Chi and Valerie Chen and Ryan Shar and Aditya Mittal and Jenny Liang and Wei-Lin Chiang and Anastasios Nikolas Angelopoulos and Ion Stoica and Graham Neubig and Ameet Talwalkar and Chris Donahue},
  year         = {2025},
  note         = {arXiv preprint}
}

Usage Restrictions

This dataset is provided for research and evaluation purposes only. By using this dataset, you agree not to:

  1. Train models on it (it is a benchmark dataset)
  2. Scrape or incorporate it into pretraining data
  3. Use it for any purpose other than evaluation
Downloads last month
84