problem_id
int64 1
113
| programming_language
stringclasses 2
values | original_code
stringlengths 0
29.4k
| highlighted_code
stringlengths 0
6.05k
⌀ | instruction
stringlengths 5
5.17k
| test_code
stringlengths 553
29.5k
| requirements
stringlengths 18
122
⌀ | conftest
stringclasses 3
values | test_utils
stringclasses 7
values | split
stringclasses 1
value | package_json
stringclasses 9
values | jest_setup
stringclasses 9
values | babel_config
stringclasses 5
values | other_files
dict | jest_dom_setup
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
101 | python | import json
import random
from groq import Groq
import os
from dotenv import load_dotenv
load_dotenv()
api_key = os.getenv("GROQ_API_KEY")
client = Groq(api_key=api_key)
class SCP_Object:
def __init__(self, name, description, triggers, initial_state):
self.name = name
self.description = description
self.triggers = triggers
self.state = initial_state
class D_Personnel:
def __init__(self, name, initial_state):
self.name = name
self.state = initial_state
self.memory = [] # List of strings, what he remembers from experiments
class Room:
def __init__(self, name, description, initial_state):
self.name = name
self.description = description
self.state = initial_state
class Game:
def __init__(self):
self.scp_objects = self.load_scp_objects()
self.current_scp = random.choice(self.scp_objects)
self.d_personnel = D_Personnel("D-" + str(random.randint(1000, 9999)), initial_state="calm")
self.room = Room("Test Chamber", "A standard containment chamber.", initial_state="clean")
self.player_report = ""
self.experiment_log = []
def load_scp_objects(self):
# Example SCP objects, can be loaded from a file later
return [
SCP_Object(
name="SCP-173",
description="SCP-173 is a concrete statue that moves when not directly observed.",
triggers={
"not_observed": "SCP-173 moves quickly towards the nearest person.",
"touch": "SCP-173 does nothing.",
},
initial_state="immobile"
),
SCP_Object(
name="SCP-096",
description="SCP-096 is a humanoid creature that becomes extremely hostile when its face is viewed.",
triggers={
"view_face": "SCP-096 will scream and chase the viewer.",
"touch": "SCP-096 does nothing.",
},
initial_state="docile"
),
SCP_Object(
name="SCP-999",
description="SCP-999 is a large, amorphous, gelatinous mass of translucent orange slime with a consistency similar to that of peanut butter. SCP-999's behavior is best described as playful and dog-like.",
triggers={
"touch": "SCP-999 will make a happy sound and try to hug the person.",
"attack": "SCP-999 will try to hug the person.",
},
initial_state="happy"
)
]
def llm_request(self, system_prompt, user_prompt):
completion = client.chat.completions.create(
model="llama3-8b-8192",
messages=[
{
"role": "system",
"content": system_prompt
},
{
"role": "user",
"content": user_prompt
},
],
temperature=0.02,
max_tokens=1024,
top_p=1,
stream=False,
response_format={"type": "json_object"},
stop=None,
)
try:
return json.loads(completion.choices[0].message.content)
except json.JSONDecodeError:
print(f"Error decoding JSON: {completion.choices[0].message.content}")
return {"message": "Error decoding JSON", "intention": "error"}
def check_instruction_clarity(self, instruction):
system_prompt = "You are a helpful assistant that checks if the instruction to people is clear and understandable. Provide a json response with the following fields: 'reasoning' (a step-by-step analysis of the instruction), 'decision' (True if the instruction is clear, False otherwise), and 'why' (a brief explanation of your decision)."
user_prompt = f"Analyze the clarity of this instruction: {instruction}"
response = self.llm_request(system_prompt, user_prompt)
return response
def check_instruction_possibility(self, instruction):
system_prompt = "You are a helpful assistant that checks if the instruction is physically possible. Provide a json response with the following fields: 'reasoning' (a step-by-step analysis of the instruction's feasibility), 'decision' (True if the instruction is possible, False otherwise), and 'why' (a brief explanation of your decision)."
user_prompt = f"Analyze the possibility of this instruction: {instruction}. Current room state: {self.room.state}"
response = self.llm_request(system_prompt, user_prompt)
return response
def check_d_willingness(self, instruction):
system_prompt = "You are a D-class personnel in a test chamber with an unknown SCP object. Provide a json response with the following fields: 'reasoning' (a step-by-step analysis of your willingness), 'decision' (True if you agree to perform the instruction, False otherwise), and 'why' (a brief explanation of your decision)."
user_prompt = f"Will you perform this instruction? Instruction: {instruction}. Your current state: {self.d_personnel.state}. Your current knowledge about SCP: {self.d_personnel.memory}"
response = self.llm_request(system_prompt, user_prompt)
return response
def emulate_experiment(self, instruction):
self.experiment_log.append(f"Instruction: {instruction}")
# Initial checks
clarity_check = self.check_instruction_clarity(instruction)
if clarity_check["decision"] == "False":
self.experiment_log.append(f"Instruction not clear: {clarity_check['why']}")
return f"Instruction not clear. Please clarify. Here is why: {clarity_check['why']}"
possibility_check = self.check_instruction_possibility(instruction)
if possibility_check["decision"] == "False":
self.experiment_log.append(f"Instruction impossible: {possibility_check['why']}")
return f"Instruction impossible. Please provide a possible instruction. Here is why: {clarity_check['why']}"
willingness_check = self.check_d_willingness(instruction)
if willingness_check["decision"] == "False":
self.experiment_log.append(f"D-personnel refused: {willingness_check['why']}")
return f"D-personnel refused. Reason: {willingness_check['why']}"
self.experiment_log.append("All checks passed. Starting emulation.")
# Emulation loop
current_actor = "d_personnel"
count_of_iterations = 0
action_history = [instruction] # Start with the initial instruction
while True and count_of_iterations < 5:
count_of_iterations += 1
if current_actor == "d_personnel":
actions = self.generate_possible_actions(action_history, self.d_personnel)
if not actions:
self.experiment_log.append("No possible actions for D-personnel.")
break
chosen_action = self.choose_action(actions)
self.experiment_log.append(f"D-personnel action: {chosen_action}")
outcomes = self.generate_outcomes(chosen_action, self.d_personnel, self.current_scp, self.room)
self.experiment_log.append(f"Outcomes: {outcomes}")
self.apply_outcomes(outcomes)
action_history.append({"d_personnel": {"action": chosen_action, "outcomes": outcomes}})
current_actor = "scp"
elif current_actor == "scp":
scp_actions = self.generate_possible_actions(action_history, self.current_scp)
if not scp_actions:
self.experiment_log.append("No possible actions for SCP.")
break
chosen_scp_action = self.choose_action(scp_actions)
self.experiment_log.append(f"SCP action: {chosen_scp_action}")
scp_outcomes = self.generate_outcomes(chosen_scp_action, self.d_personnel, self.current_scp, self.room)
self.experiment_log.append(f"SCP Outcomes: {scp_outcomes}")
self.apply_outcomes(scp_outcomes)
action_history.append({"scp": {"action": chosen_scp_action, "outcomes": scp_outcomes}})
current_actor = "d_personnel"
def generate_possible_actions(self, action_history, actor):
if isinstance(actor, D_Personnel):
system_prompt = """You are a helpful assistant that generates possible actions for D-class personnel.
Answer in json format in format: {actions: ["action1", "action2", ... ]}.
Generate 3-5 possible actions based on the instruction and action history."""
user_prompt = f"""Generate possible actions for D-class personnel based on this history:
Initial instruction: {action_history[0]}
Action history: {action_history[1:]}
Current state: {actor.state}
Current knowledge about SCP: {actor.memory}"""
response = self.llm_request(system_prompt, user_prompt)
if "actions" in response:
return response["actions"]
else:
return []
elif isinstance(actor, SCP_Object):
def choose_action(self, actions):
if not actions:
return None
return random.choice(actions)
def generate_outcomes(self, action, d_personnel, scp_object, room):
system_prompt = "You are a helpful assistant that generates possible outcomes of an action. Answer in json format in format: {outcomes: [{\"description\": \"\", \"d_personnel_state\": \"\", \"scp_state\": \"\", \"room_state\": \"\"}, ... ]}. Generate 3-5 possible outcomes based on the action and current state."
user_prompt = f"Generate possible outcomes for this action: {action}. D-personnel state: {d_personnel.state}. SCP state: {scp_object.state}. Room state: {room.state}. SCP description: {scp_object.description}. SCP triggers: {scp_object.triggers}"
response = self.llm_request(system_prompt, user_prompt)
if "outcomes" in response:
return response["outcomes"]
else:
return []
def apply_outcomes(self, outcomes):
if not outcomes:
return
chosen_outcome = random.choice(outcomes)
self.experiment_log.append(f"Chosen outcome: {chosen_outcome}")
if "d_personnel_state" in chosen_outcome:
self.d_personnel.state = chosen_outcome["d_personnel_state"]
if "scp_state" in chosen_outcome:
self.current_scp.state = chosen_outcome["scp_state"]
if "room_state" in chosen_outcome:
self.room.state = chosen_outcome["room_state"]
def get_d_report(self):
if self.d_personnel.state == "dead":
return "D-personnel is dead. No report available."
system_prompt = "You are a D-class personnel. You need to describe what happened during the experiment. Answer in json format in format: {report: \"\"}. Describe what you remember from the experiment."
user_prompt = f"Describe what happened during the experiment. Your current state: {self.d_personnel.state}. Your current knowledge about SCP: {self.d_personnel.memory}. Experiment log: {self.experiment_log}"
response = self.llm_request(system_prompt, user_prompt)
if "report" in response:
self.d_personnel.memory.append(response["report"])
return response["report"]
else:
return "No report available."
def get_d_death_report(self):
if self.d_personnel.state != "dead":
return "D-personnel is alive. No death report available."
system_prompt = "You are a forensic expert. You need to describe the state of the dead D-class personnel. Answer in json format in format: {report: \"\"}. Describe the state of the body."
user_prompt = f"Describe the state of the dead D-class personnel. Experiment log: {self.experiment_log}"
response = self.llm_request(system_prompt, user_prompt)
if "report" in response:
return response["report"]
else:
return "No death report available."
def start_experiment(self, instruction):
self.experiment_log = []
result = self.emulate_experiment(instruction)
if result:
return result
if self.d_personnel.state == "dead":
report = self.get_d_death_report()
else:
report = self.get_d_report()
self.d_personnel = D_Personnel("D-" + str(random.randint(1000, 9999)), initial_state="calm")
return report
def submit_report(self, player_report):
self.player_report = player_report
system_prompt = "You are a helpful assistant that checks if the player report is factually correct. Answer in json format in format: {message: \"\", score: 0-100}. Compare the player report with the SCP description and triggers. Score should be 0 if the report is completely wrong and 100 if the report is completely correct."
user_prompt = f"Compare the player report with the SCP description and triggers. Player report: {player_report}. SCP description: {self.current_scp.description}. SCP triggers: {self.current_scp.triggers}"
response = self.llm_request(system_prompt, user_prompt)
return response
def play(self):
print("Welcome to SCPIE!")
instruction = input("Enter instruction for D-personnel: ")
result = self.start_experiment(instruction)
print("Experiment result:", result)
print("\n\n\n")
for log in self.experiment_log:
if isinstance(log, dict):
json.dumps(log, indent=4, ensure_ascii=False)
else:
print(log)
print()
# print(self.experiment_log)
if __name__ == "__main__":
game = Game()
game.play()
# print("Yes")
| def emulate_experiment(self, instruction):
self.experiment_log.append(f"Instruction: {instruction}")
# Initial checks
clarity_check = self.check_instruction_clarity(instruction)
if clarity_check["decision"] == "False":
self.experiment_log.append(f"Instruction not clear: {clarity_check['why']}")
return f"Instruction not clear. Please clarify. Here is why: {clarity_check['why']}"
possibility_check = self.check_instruction_possibility(instruction)
if possibility_check["decision"] == "False":
self.experiment_log.append(f"Instruction impossible: {possibility_check['why']}")
return f"Instruction impossible. Please provide a possible instruction. Here is why: {clarity_check['why']}"
willingness_check = self.check_d_willingness(instruction)
if willingness_check["decision"] == "False":
self.experiment_log.append(f"D-personnel refused: {willingness_check['why']}")
return f"D-personnel refused. Reason: {willingness_check['why']}"
self.experiment_log.append("All checks passed. Starting emulation.")
# Emulation loop
current_actor = "d_personnel"
count_of_iterations = 0
action_history = [instruction] # Start with the initial instruction
while True and count_of_iterations < 5:
count_of_iterations += 1
if current_actor == "d_personnel":
actions = self.generate_possible_actions(action_history, self.d_personnel)
if not actions:
self.experiment_log.append("No possible actions for D-personnel.")
break
chosen_action = self.choose_action(actions)
self.experiment_log.append(f"D-personnel action: {chosen_action}")
outcomes = self.generate_outcomes(chosen_action, self.d_personnel, self.current_scp, self.room)
self.experiment_log.append(f"Outcomes: {outcomes}")
self.apply_outcomes(outcomes)
action_history.append({"d_personnel": {"action": chosen_action, "outcomes": outcomes}})
current_actor = "scp"
elif current_actor == "scp":
scp_actions = self.generate_possible_actions(action_history, self.current_scp)
if not scp_actions:
self.experiment_log.append("No possible actions for SCP.")
break
chosen_scp_action = self.choose_action(scp_actions)
self.experiment_log.append(f"SCP action: {chosen_scp_action}")
scp_outcomes = self.generate_outcomes(chosen_scp_action, self.d_personnel, self.current_scp, self.room)
self.experiment_log.append(f"SCP Outcomes: {scp_outcomes}")
self.apply_outcomes(scp_outcomes)
action_history.append({"scp": {"action": chosen_scp_action, "outcomes": scp_outcomes}})
current_actor = "d_personnel"
def generate_possible_actions(self, action_history, actor):
if isinstance(actor, D_Personnel):
system_prompt = """You are a helpful assistant that generates possible actions for D-class personnel.
Answer in json format in format: {actions: ["action1", "action2", ... ]}.
Generate 3-5 possible actions based on the instruction and action history."""
user_prompt = f"""Generate possible actions for D-class personnel based on this history:
Initial instruction: {action_history[0]}
Action history: {action_history[1:]}
Current state: {actor.state}
Current knowledge about SCP: {actor.memory}"""
response = self.llm_request(system_prompt, user_prompt)
if "actions" in response:
return response["actions"]
else:
return []
elif isinstance(actor, SCP_Object):
| continue this function with SCP object prompting, just like D_personal | import ast
import inspect
import pytest
def extract_generate_possible_actions_branches(module_code, d_class_name, scp_class_name):
"""Extract the AST bodies of the D_Personnel and SCP_Object branches in generate_possible_actions."""
try:
parsed = ast.parse(module_code)
except SyntaxError as e:
print(f"SyntaxError while parsing module code: {e}")
return None, None
except Exception as e:
print(f"Unexpected error while parsing module code: {e}")
return None, None
for node in ast.walk(parsed):
if isinstance(node, ast.ClassDef) and node.name == "Game":
for item in node.body:
if isinstance(item, ast.FunctionDef) and item.name == "generate_possible_actions":
d_branch = None
scp_branch = None
for subnode in ast.walk(item):
if isinstance(subnode, ast.If):
test = subnode.test
if (
isinstance(test, ast.Call)
and isinstance(test.func, ast.Name)
and test.func.id == "isinstance"
and isinstance(test.args[1], ast.Name)
):
class_name = test.args[1].id
if class_name == d_class_name:
d_branch = subnode.body
elif class_name == scp_class_name:
scp_branch = subnode.body
return d_branch, scp_branch
return None, None
def ast_structure_summary(ast_nodes):
"""Extracts structure summary from AST nodes to compare similarity."""
summary = []
for node in ast_nodes:
if isinstance(node, ast.Assign) and isinstance(node.value, ast.Constant):
summary.append(("assign", node.targets[0].id, type(node.value.value).__name__))
elif isinstance(node, ast.Assign):
summary.append(("assign", node.targets[0].id, type(node.value).__name__))
elif isinstance(node, ast.Expr) and isinstance(node.value, ast.Call):
summary.append(("call", getattr(node.value.func, 'id', 'unknown')))
elif isinstance(node, ast.Return):
summary.append(("return",))
elif isinstance(node, ast.If):
summary.append(("if",))
else:
summary.append((type(node).__name__,))
return summary
def test_generate_possible_actions_structure_similarity(implementation):
impl_name, module = implementation
module_code = inspect.getsource(module)
# Extract the class names to match against isinstance checks
d_class_name = "D_Personnel"
scp_class_name = "SCP_Object"
# Get the AST branches
d_branch, scp_branch = extract_generate_possible_actions_branches(module_code, d_class_name, scp_class_name)
assert d_branch is not None, "Could not extract D_Personnel branch"
assert scp_branch is not None, "Could not extract SCP_Object branch"
# Compare structure
d_summary = ast_structure_summary(d_branch)
scp_summary = ast_structure_summary(scp_branch)
assert d_summary == scp_summary, f"Mismatch in structure:\nD: {d_summary}\nSCP: {scp_summary}"
| pytest
pytest-mock
groq
python-dotenv | import pytest
import os
import sys
import json
from typing import Dict, List, Optional, Any
# Import from local test_utils.py in the same directory
from test_utils import TestUtils, TestResultsManager
# Load all implementations in the current sandbox
implementations = TestUtils.load_all_implementations()
test_results = TestResultsManager()
@pytest.fixture(scope="session")
def sandbox_dir():
"""Fixture to provide the sandbox directory path."""
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope="session")
def sandbox_name():
"""Fixture to provide the sandbox name."""
return os.path.basename(os.path.dirname(os.path.abspath(__file__)))
@pytest.fixture(scope="session")
def all_implementations():
"""Fixture to provide all implementations as a dictionary."""
return implementations
@pytest.fixture(params=list(implementations.items()))
def implementation(request):
"""Fixture to provide each implementation to tests one at a time."""
return request.param
@pytest.fixture(scope="session")
def results_manager():
"""Fixture to provide access to the test results manager."""
return test_results
# Hook for collecting test results
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""Pytest hook to collect test results."""
# Execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# We're only interested in the call outcome
if rep.when == "call":
if hasattr(item, "callspec") and "implementation" in item.callspec.params:
# Get implementation name and module
impl_name, _ = item.callspec.params["implementation"]
# Get test name
test_name = item.nodeid.split("::")[-1]
# Record result
if rep.passed:
test_results.record_result(impl_name, test_name, True)
elif rep.failed:
error_msg = str(rep.longrepr) if rep.longrepr else "Test failed"
test_results.record_result(impl_name, test_name, False, error_msg)
elif rep.skipped:
skip_reason = rep.longrepr[2] if rep.longrepr else "Test skipped"
test_results.record_skip(impl_name, test_name, skip_reason)
# Hook to save results at the end of testing
@pytest.hookimpl(trylast=True)
def pytest_sessionfinish(session, exitstatus):
"""Save test results at the end of the test session."""
test_results.save_results() | import os
import sys
import glob
import re
import importlib.util
import traceback
import types
from typing import Dict, List, Optional, Any, Tuple
class TestUtils:
@staticmethod
def discover_implementation_files(directory: str = None) -> List[str]:
"""Find all implementation files in the current sandbox directory."""
if directory is None:
directory = os.path.dirname(os.path.abspath(__file__))
patterns = [
r'modified_code\d+\.py',
r'new_code\d+\.py',
# r'original_code\.py',
r'implementation\d*\.py'
]
pattern = re.compile('|'.join(f'({p})' for p in patterns))
implementations = []
for file_path in glob.glob(os.path.join(directory, '*.py')):
if pattern.search(os.path.basename(file_path)):
implementations.append(file_path)
# Sort files numerically
def sort_key(path):
filename = os.path.basename(path)
match = re.search(r'(\d+)', filename)
return int(match.group(1)) if match else 0
return sorted(implementations, key=sort_key)
@staticmethod
def create_mock_module(file_path: str, module_name: str, error_info: str) -> types.ModuleType:
"""Create a mock module that contains error information but can still be tested."""
# Create a new module object
mock_module = types.ModuleType(module_name)
# Add basic attributes
mock_module.__file__ = file_path
mock_module.__name__ = module_name
mock_module.__display_name__ = module_name
mock_module.__error__ = error_info
# Add a dummy function that can be detected by test functions
def dummy_function(*args, **kwargs):
return f"Error in module: {error_info}"
setattr(mock_module, "implementation_error", dummy_function)
return mock_module
@staticmethod
def load_module(file_path: str, module_name: Optional[str] = None) -> Any:
"""
Safely load a module from a file path with proper error handling.
If the module has errors, return a mock module that can still be tested.
"""
if module_name is None:
module_name = os.path.basename(file_path).replace('.py', '')
# Create a unique module name to avoid conflicts
sandbox_id = os.path.basename(os.path.dirname(file_path))
unique_module_name = f"{sandbox_id}_{module_name}"
try:
# First, try to read the file to check for syntax errors
with open(file_path, 'r') as f:
source_code = f.read()
# Check for syntax errors by compiling the code
try:
compiled = compile(source_code, file_path, 'exec')
except SyntaxError as e:
error_msg = f"Syntax error: {str(e)}"
print(f"Syntax error in {file_path}: {e}")
print(f" Line {e.lineno}, column {e.offset}: {e.text}")
return TestUtils.create_mock_module(file_path, unique_module_name, error_msg)
# Create the module spec
spec = importlib.util.spec_from_file_location(unique_module_name, file_path)
if spec is None or spec.loader is None:
error_msg = f"Could not create spec for {file_path}"
print(f"Error: {error_msg}")
return TestUtils.create_mock_module(file_path, unique_module_name, error_msg)
# Create the module object
module = importlib.util.module_from_spec(spec)
sys.modules[unique_module_name] = module
# Special handling for execution errors
try:
# Execute the module code in a safe way
spec.loader.exec_module(module)
# Store the original name for reference
module.__display_name__ = module_name
return module
except Exception as e:
error_msg = f"Runtime error: {str(e)}"
traceback_str = traceback.format_exc()
print(f"Error executing module {file_path}: {e}")
print(traceback_str)
# Create a partial module that contains what we loaded before the error
mock_module = TestUtils.create_mock_module(file_path, unique_module_name, error_msg)
# Copy any attributes that might have been defined before the error
for attr_name in dir(module):
if not attr_name.startswith('__'):
try:
setattr(mock_module, attr_name, getattr(module, attr_name))
except Exception:
pass # Skip attributes that can't be copied
return mock_module
except FileNotFoundError as e:
error_msg = f"File not found: {str(e)}"
print(f"Error: {error_msg}")
return TestUtils.create_mock_module(file_path, unique_module_name, error_msg)
except Exception as e:
error_msg = f"Unexpected error: {str(e)}"
print(f"Error loading module {file_path}: {e}")
return TestUtils.create_mock_module(file_path, unique_module_name, error_msg)
@classmethod
def load_all_implementations(cls, directory: str = None) -> Dict[str, Any]:
"""Load all implementation files in the directory, including those with errors."""
if directory is None:
directory = os.path.dirname(os.path.abspath(__file__))
implementations = {}
implementation_files = cls.discover_implementation_files(directory)
if not implementation_files:
print("WARNING: No implementation files found. Check your file naming patterns.")
for file_path in implementation_files:
module_name = os.path.basename(file_path).replace('.py', '')
module = cls.load_module(file_path, module_name)
# Always add the module, even if it has errors
implementations[module_name] = module
if hasattr(module, '__error__'):
print(f"Loaded with errors: {module_name} - {module.__error__}")
else:
print(f"Successfully loaded: {module_name}")
return implementations
class TestResultsManager:
def __init__(self):
self.results = {}
self.sandbox_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
def record_result(self, impl_name: str, test_name: str, passed: bool,
error_msg: Optional[str] = None) -> None:
"""Record a test result for an implementation."""
if impl_name not in self.results:
self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []}
if passed:
self.results[impl_name]["passed"] += 1
else:
self.results[impl_name]["failed"] += 1
if error_msg:
self.results[impl_name]["errors"].append({
"test": test_name,
"error": error_msg
})
def record_skip(self, impl_name: str, test_name: str,
reason: Optional[str] = None) -> None:
"""Record a skipped test for an implementation."""
if impl_name not in self.results:
self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []}
self.results[impl_name]["skipped"] += 1
if reason:
self.results[impl_name]["errors"].append({
"test": test_name,
"error": f"SKIPPED: {reason}"
})
def get_winner(self) -> Tuple[Optional[int], Dict]:
"""Determine the winner based on test results."""
winner = None
max_passed = -1
for impl_name, results in self.results.items():
if impl_name == "original_code":
continue # Skip original code when determining winner
if results["passed"] > max_passed:
max_passed = results["passed"]
winner = impl_name
# Break ties by looking at failure count
elif results["passed"] == max_passed and winner is not None:
if results["failed"] < self.results[winner]["failed"]:
winner = impl_name
# Convert winner to numeric index if possible
winner_index = -1
if winner and re.match(r'modified_code\d+', winner):
try:
winner_index = int(re.search(r'(\d+)', winner).group(1))
except (AttributeError, ValueError):
pass
return winner_index, self.results
def save_results(self, filename: str = "test_results.json") -> None:
"""Save test results to a JSON file."""
import json
winner_index, results = self.get_winner()
# Check if all tests were skipped
all_skipped = all(
stats["skipped"] == stats["passed"] + stats["failed"] + stats["skipped"]
for impl_name, stats in results.items()
if impl_name != "original_code"
)
output = {
"winner": winner_index,
"all_skipped": all_skipped,
"results": {
name: {
"passed": stats["passed"],
"failed": stats["failed"],
"skipped": stats["skipped"],
"total": stats["passed"] + stats["failed"] + stats["skipped"]
}
for name, stats in results.items()
if not name.startswith("_") # Skip internal items
}
}
with open(filename, "w") as f:
json.dump(output, f, indent=2)
print(f"Test results saved to {filename}")
return output | test | null | null | null | null | null |
102 | python | import pandas as pd
import os
import random
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import precision_score, recall_score
from torch.nn import functional as F
from PIL import Image, ImageDraw, ImageFont
import matplotlib.pyplot as plt
import seaborn as sns
from colpali_engine.interpretability import (
get_similarity_maps_from_embeddings,
plot_all_similarity_maps,
)
# Path to extracted Flickr8k dataset
FLICKR8K_IMAGES_PATH = "flickr8k/Images"
FLICKR8K_CAPTIONS_PATH = "flickr8k/captions.txt"
# Function to load image-text pairs from Flickr8k
def load_flickr8k_data(images_path, captions_path, fraction=0.1):
# Read captions file
with open(captions_path, "r") as f:
captions_data = f.readlines()[1:] # Skip header
# Parse captions
image_text_pairs = {}
for line in captions_data:
image_name, caption = line.strip().split(",", 1)
if image_name not in image_text_pairs:
image_text_pairs[image_name] = []
image_text_pairs[image_name].append(caption)
# Load only a fraction of the dataset
selected_images = random.sample(
list(image_text_pairs.keys()), int(len(image_text_pairs) * fraction)
)
image_text_pairs = {k: image_text_pairs[k] for k in selected_images}
# Create pairs of images and captions
pairs = []
for image_name, captions in image_text_pairs.items():
image_path = os.path.join(images_path, image_name)
if os.path.exists(image_path):
pairs.append((Image.open(image_path), random.choice(captions)))
return pairs
# Function to create unrelated pairs
def create_unrelated_pairs(image_text_pairs):
"""
Creates unrelated pairs of images and texts by randomly shuffling the texts.
Args:
image_text_pairs (list): A list of tuples containing images and their corresponding texts.
Returns:
list: A list of tuples containing images and unrelated texts.
"""
images, texts = zip(*image_text_pairs)
unrelated_texts = random.sample(texts, len(texts))
return list(zip(images, unrelated_texts))
def create_visual_pairs(image_text_pairs):
"""
Creates pairs of original and augmented images from image-text pairs.
This function takes a list of image-text pairs and creates new pairs consisting
of the original images and their augmented versions. The augmentation used
in this implementation is a horizontal flip.
Args:
image_text_pairs (list): A list of tuples containing (image, text) pairs,
where images are PIL Image objects and texts are strings.
Returns:
list: A list of tuples containing (original_image, augmented_image) pairs,
where both elements are PIL Image objects.
"""
from torchvision.transforms import ToTensor
images, _ = zip(*image_text_pairs)
# Example augmentation: horizontal flip
augmented_images = [ToTensor()(image).flip(-1) for image in images]
return list(zip(images, augmented_images))
def get_embeddings(images, texts, model_id="google/siglip-base-patch16-224"):
"""
Given lists of images and texts, returns normalized embeddings for both.
"""
# Ensure texts is a list of strings
if not all(isinstance(t, str) for t in texts):
raise ValueError("All text inputs must be strings.")
device = "cuda" if torch.cuda.is_available() else "cpu"
model = AutoModel.from_pretrained(
model_id, ignore_mismatched_sizes=True).to(device)
processor = AutoProcessor.from_pretrained(model_id)
# Preprocess images and texts
image_inputs = processor(images=images, return_tensors="pt").to(device)
text_inputs = processor(text=texts, return_tensors="pt", padding="max_length").to(
device
)
with torch.no_grad():
image_embeds = model.get_image_features(**image_inputs)
text_embeds = model.get_text_features(**text_inputs)
# Normalize embeddings
image_embeds = image_embeds / image_embeds.norm(dim=-1, keepdim=True)
text_embeds = text_embeds / text_embeds.norm(dim=-1, keepdim=True)
return image_embeds, text_embeds
def cosine_similarity_analysis(embeddings1, embeddings2, title):
"""
Computes cosine similarity for matching and unrelated pairs and compares distributions.
"""
similarities = cosine_similarity(
embeddings1.cpu().numpy(), embeddings2.cpu().numpy()
)
# Matching pairs: Diagonal of the similarity matrix
matching_similarities = np.diag(similarities)
# Unrelated pairs: Off-diagonal similarities
unrelated_similarities = similarities[~np.eye(
similarities.shape[0], dtype=bool)]
print(f"### {title} ###")
print(f"Mean Matching Similarity: {np.mean(matching_similarities):.4f}")
print(f"Mean Unrelated Similarity: {np.mean(unrelated_similarities):.4f}")
print()
# Plot distributions
plt.figure(figsize=(10, 6))
sns.histplot(
matching_similarities, kde=True, label="Matching Pairs", color="blue", bins=30
)
sns.histplot(
unrelated_similarities, kde=True, label="Unrelated Pairs", color="red", bins=30
)
plt.title(f"{title}: Cosine Similarity Distributions")
plt.xlabel("Cosine Similarity")
plt.ylabel("Frequency")
plt.legend()
plt.show()
# b. Nearest-Neighbor Retrieval
def retrieval_metrics(query_embeds, target_embeds, ground_truth_indices, k=5):
"""
Computes Precision@k and Recall@k for nearest-neighbor retrieval.
This function evaluates the effectiveness of retrieval by calculating Precision@k and Recall@k.
Precision@k measures the accuracy of the top-k retrieved items, while Recall@k measures the ability
to find the relevant item within the top-k retrieved items. It assumes there's only one true
match per query.
Args:
query_embeds (torch.Tensor): Embeddings of the query data.
target_embeds (torch.Tensor): Embeddings of the target data (database).
ground_truth_indices (list): List of indices in the target data representing the true matches for each query.
k (int): The number of top results to consider.
Returns:
tuple: A tuple containing mean Precision@k and mean Recall@k.
"""
similarities = cosine_similarity(
query_embeds.cpu().numpy(), target_embeds.cpu().numpy()
)
sorted_indices = np.argsort(-similarities, axis=1)[:, :k] # Top-k indices
# Compute metrics
precisions = []
recalls = []
for i, true_idx in enumerate(ground_truth_indices):
retrieved_indices = sorted_indices[i]
true_positives = int(true_idx in retrieved_indices)
precisions.append(true_positives / k)
recalls.append(true_positives / 1) # Only one true match per query
mean_precision = np.mean(precisions)
mean_recall = np.mean(recalls)
return mean_precision, mean_recall
def plot_query_token_importance(
pil_image, similarity_maps, query_tokens, alpha: float = 0.5
) -> None:
"""
Plot a separate heatmap for each query token in the similarity_maps.
Args:
pil_image (PIL.Image.Image): The original image (e.g., loaded via Image.open(...)).
similarity_maps (torch.Tensor):
Shape = (num_query_tokens, n_patches_x, n_patches_y).
query_tokens (List[str]): A list of strings for each token in the query.
alpha (float): Transparency for the heatmap overlays (0=transparent, 1=opaque).
"""
# Convert PIL to numpy
image_np = np.array(pil_image)
H, W = image_np.shape[:2]
num_tokens = similarity_maps.size(0)
assert num_tokens == len(query_tokens), (
f"The number of query tokens in similarity_maps ({num_tokens}) "
f"doesn't match the length of query_tokens list ({len(query_tokens)})."
)
fig, axs = plt.subplots(1, num_tokens, figsize=(5 * num_tokens, 5))
if num_tokens == 1:
# If there's only one token, axs won't be an iterable
axs = [axs]
for idx in range(num_tokens):
# Each similarity_map for a single query token: shape = (n_patches_x, n_patches_y)
single_map = similarity_maps[idx] # (n_patches_x, n_patches_y)
# Upsample to full image size
single_map_4d = single_map.unsqueeze(0).unsqueeze(
0
) # (1,1,n_patches_x, n_patches_y)
upsampled = F.interpolate(
single_map_4d, size=(H, W), mode="bilinear", align_corners=False
)
# .to(torch.float32) fix if your map is bfloat16
heatmap = upsampled.squeeze().to(torch.float32).cpu().numpy() # (H, W)
# Optionally normalize heatmap (uncomment if desired)
# heatmap = (heatmap - heatmap.min()) / (heatmap.max() - heatmap.min() + 1e-8)
# Plot
axs[idx].imshow(image_np, cmap=None if image_np.ndim == 3 else "gray")
axs[idx].imshow(heatmap, cmap="jet", alpha=alpha)
axs[idx].set_title(f"Query: {query_tokens[idx]}")
axs[idx].axis("off")
plt.tight_layout()
plt.show()
def get_maps_and_embeds(
batch_images, batch_queries, model, processor, image, use_qwen=False
):
"""
Computes similarity maps and embeddings from a batch of images and queries using the specified model and processor.
Args:
batch_images (dict): A dictionary of batched image inputs processed by the processor.
batch_queries (dict): A dictionary of batched query inputs processed by the processor.
model (nn.Module): The model used for computing embeddings.
processor (Processor): The processor responsible for image and text preprocessing.
Returns:
tuple: A tuple containing:
- original_maps (torch.Tensor): Similarity maps between images and queries
with shape (num_queries, n_patches_x, n_patches_y).
- original_image_embeddings (torch.Tensor): Embeddings of the input images.
- original_query_embeddings (torch.Tensor): Embeddings of the input queries.
"""
with torch.no_grad():
original_image_embeddings = model.forward(**batch_images)
original_query_embeddings = model.forward(**batch_queries)
if use_qwen:
n_patches = processor.get_n_patches(
image_size=image.size,
patch_size=model.patch_size,
spatial_merge_size=model.spatial_merge_size,
)
else:
n_patches = processor.get_n_patches(
image_size=image.size, patch_size=model.patch_size
)
image_mask = processor.get_image_mask(batch_images)
# Compute original similarity maps
original_batched_maps = get_similarity_maps_from_embeddings(
image_embeddings=original_image_embeddings,
query_embeddings=original_query_embeddings,
n_patches=n_patches,
image_mask=image_mask,
)
# (query_length, n_patches_x, n_patches_y)
original_maps = original_batched_maps[0].permute(0, 2, 1).contiguous()
return original_maps, original_image_embeddings, original_query_embeddings
def visualize_token_map(
image,
original_maps,
token_list,
token_index=2,
cmap="Greens",
figsize=(15, 2),
show_text=True,
):
"""
Visualize a token's attention map in three ways: the original image, the raw attention map with numerical values,
and an overlay of the attention map on the original image.
Args:
image (PIL.Image): The input image to visualize.
original_maps (torch.Tensor or np.ndarray): Attention maps with shape (num_tokens, height, width).
token_list (list[str]): List of token strings corresponding to each attention map.
token_index (int, optional): Index of the token/map to visualize. Defaults to 2.
cmap (str, optional): Matplotlib colormap name for visualizing the attention maps. Defaults to "Greens".
The function creates a figure with three subplots:
1. The original input image
2. The raw attention map with numerical values annotated
3. The attention map overlaid on the original image with a colorbar
Returns:
None. Displays the visualization using matplotlib.
"""
# Convert the image to a NumPy array
image_np = np.array(image)
# Select the map corresponding to the token
visual_map = original_maps[token_index]
# Convert visual_map to NumPy array if it's a tensor
if isinstance(visual_map, torch.Tensor):
visual_map = visual_map.cpu().to(dtype=torch.float32).numpy()
elif not isinstance(visual_map, np.ndarray):
visual_map = np.array(visual_map)
# Convert map to a PIL image
visual_map_pil = Image.fromarray(visual_map)
# Resize using NEAREST to keep "big pixels"
visual_map_pil = visual_map_pil.resize(
(image_np.shape[1], image_np.shape[0]), # (width, height)
resample=Image.NEAREST,
)
# Convert back to NumPy
resized_map = np.array(visual_map_pil)
# Create a figure with subplots
fig, axes = plt.subplots(1, 3, figsize=(15, 2))
# Display the raw image
axes[0].imshow(image_np)
axes[0].set_title("Raw Image")
axes[0].axis("off")
# Display the raw map with annotations
im = axes[1].imshow(visual_map, cmap=cmap)
axes[1].set_title("Raw Map")
axes[1].axis("off")
if show_text:
# Annotate the heatmap
for i in range(visual_map.shape[0]):
for j in range(visual_map.shape[1]):
text = axes[1].text(
j,
i,
f"{visual_map[i, j]:.2f}",
ha="center",
va="center",
color="w" if visual_map[i, j] > visual_map.max(
) / 2 else "black",
)
# Display the overlay plot
axes[2].imshow(image_np, alpha=1)
axes[2].imshow(resized_map, cmap=cmap, alpha=0.6)
axes[2].set_title("Overlay: Image + Map")
axes[2].axis("off")
# Add a colorbar for the overlay with matching values to the raw map
cbar = fig.colorbar(
plt.cm.ScalarMappable(
cmap=cmap, norm=plt.Normalize(
vmin=visual_map.min(), vmax=visual_map.max())
),
ax=axes[2],
shrink=0.8,
orientation="vertical",
)
cbar.set_label("Map Intensity")
# Add a title with the token name
plt.suptitle(f"Token: {token_list[token_index]}")
# Adjust layout and show
plt.tight_layout()
plt.show()
def create_single_patch_image(
n_patches_x,
n_patches_y,
patch_size,
main_color,
special_color,
special_patch,
special_patch_width=2,
):
"""
Creates an image composed of colored patches, with one special patch highlighted.
The image is divided into a grid of n_patches_x by n_patches_y patches, each of size
patch_size x patch_size pixels. All patches are filled with the main_color, except
for the special_patch, which is filled with special_color. The special patch can
also have a width of more than one patch.
Args:
n_patches_x (int): Number of patches horizontally.
n_patches_y (int): Number of patches vertically.
patch_size (int): The size (in pixels) of each square patch.
main_color (list): The [R, G, B] color for most patches.
special_color (list): The [R, G, B] color for the special patch.
special_patch (tuple): The (row, col) position of the top-left corner of the special patch (0-indexed).
special_patch_width (int, optional): The width of the special patch in number of patches. Defaults to 2.
Returns:
PIL Image: The generated image.
"""
# Create a 3D NumPy array for the image
img_height = n_patches_y * patch_size
img_width = n_patches_x * patch_size
image_data = np.zeros((img_height, img_width, 3), dtype=np.uint8)
# Fill the entire image with the main color
image_data[:, :] = main_color
# Assign the special color to the special patch
special_row, special_col = special_patch
image_data[
special_row * patch_size: (special_row + special_patch_width) * patch_size,
special_col * patch_size: (special_col + special_patch_width) * patch_size,
] = special_color
return Image.fromarray(image_data)
def extract_patch_mask(image, patch_size, special_color=[0, 0, 0]):
"""
Extract a binary mask indicating the location of the special patch.
Args:
image (PIL.Image.Image): The input image.
patch_size (int): The size of each square patch in pixels.
special_color (list[int]): The RGB color of the special patch.
Returns:
np.ndarray: A binary mask of shape (n_patches_y, n_patches_x) indicating
the special patch location (1 for special patch, 0 otherwise).
"""
# Convert the image to a NumPy array
image_np = np.array(image)
# Get image dimensions
img_height, img_width, _ = image_np.shape
# Compute the number of patches
n_patches_y = img_height // patch_size
n_patches_x = img_width // patch_size
# Initialize the patch mask
patch_mask = np.zeros((n_patches_y, n_patches_x), dtype=np.int32)
# Iterate over all patches to locate the special patch
for row in range(n_patches_y):
for col in range(n_patches_x):
# Extract the patch
patch = image_np[
row * patch_size: (row + 1) * patch_size,
col * patch_size: (col + 1) * patch_size,
]
# Check if the patch matches the special color
if np.allclose(patch.mean(axis=(0, 1)), special_color, atol=1e-6):
patch_mask[row, col] = 1 # Mark this patch as special
return patch_mask
def evaluate_map_quality(similarity_map, patch_mask):
"""
Evaluate the quality of a similarity map with respect to a binary patch mask.
Args:
similarity_map (torch.Tensor): The similarity map (height, width).
patch_mask (np.ndarray): The binary mask for the patch (1 for black patch, 0 elsewhere).
Returns:
dict: Metrics including correlation, peak accuracy, and overlap score.
"""
# Ensure similarity_map is in float32 and on the CPU
similarity_map = similarity_map.to(dtype=torch.float32).cpu().numpy()
# Flatten the map and mask for easier computation
sim_map_flat = similarity_map.flatten()
patch_mask_flat = patch_mask.flatten()
# Ensure the shapes are compatible
if sim_map_flat.shape != patch_mask_flat.shape:
raise ValueError(
f"Shape mismatch: similarity_map has {sim_map_flat.shape} elements, "
f"but patch_mask has {patch_mask_flat.shape} elements."
)
# (A) Correlation
correlation = np.corrcoef(
sim_map_flat, patch_mask_flat.astype(np.float32))[0, 1]
# (B) Peak Signal Location
max_location = np.unravel_index(
np.argmax(similarity_map), similarity_map.shape)
expected_location = np.unravel_index(
np.argmax(patch_mask), patch_mask.shape)
peak_accuracy = 1 if max_location == expected_location else 0
# (C) Normalized Map Overlap
black_patch_score = similarity_map[patch_mask == 1].mean()
background_score = similarity_map[patch_mask == 0].mean()
overlap_score = black_patch_score / (
background_score + 1e-8
) # Avoid division by zero
# Return all metrics
return {
"correlation": correlation,
"peak_accuracy": peak_accuracy,
"overlap_score": overlap_score,
}
def evaluate_image_maps(similarity_map, real_image):
"""
Evaluates the quality of similarity maps by comparing them to a real image.
Args:
similarity_map (torch.Tensor): The similarity map to evaluate.
real_image (PIL.Image.Image): The corresponding real image.
Returns:
dict: A dictionary containing the calculated metrics: accuracy, score, and rank.
"""
# Convert the real image to a binary array (1 - normalized grayscale)
image_array = 1 - np.array(real_image.convert("L"),
dtype=np.float32) / 255.0
# Ensure similarity_map is float32 and on the CPU before using numpy operations
similarity_map_cpu = similarity_map.to(dtype=torch.float32).cpu().numpy()
# Create a mask for the maximum values in the similarity map
acc_visual_map = np.where(
similarity_map_cpu == similarity_map_cpu.max(), similarity_map_cpu, 0
)
# Check if scaling is necessary
if image_array.shape != similarity_map_cpu.shape:
scale_factor = image_array.shape[0] // similarity_map_cpu.shape[0]
scaled_visual_map = np.kron(
np.abs(similarity_map_cpu), np.ones((scale_factor, scale_factor))
)
rank_map = np.kron(
np.abs(similarity_map_cpu), np.ones((scale_factor, scale_factor))
)
acc_visual_map = np.kron(
np.abs(acc_visual_map), np.ones((scale_factor, scale_factor))
)
else:
scaled_visual_map = similarity_map_cpu
rank_map = similarity_map_cpu # Add this to avoid missing variable
# Calculate accuracy and score
accuracy = np.any(image_array * acc_visual_map)
score = np.sum(image_array * scaled_visual_map) / (
np.sum(image_array) + 1e-8
) # Avoid division by zero
# Calculate rank
bin_image = (image_array != 0).astype(int)
rank_value = np.sum(bin_image * rank_map) / np.sum(
bin_image
) # Avoid division by zero
sorted_values = sorted(np.abs(similarity_map_cpu.ravel()))[::-1]
rank = np.where(np.isclose(sorted_values, rank_value))[0][0]
return {
"accuracy": accuracy,
"score": score,
"rank": rank,
}
def create_single_patch_image_with_text(
n_patches_x,
n_patches_y,
patch_size,
main_color,
special_color,
special_patch,
text="Hello",
text_color=(255, 255, 255),
special_patch_width=2,
font_size=16,
# Added font_path parameter with default value
font_path="./fonts/Roboto-Regular.ttf",
):
"""
Creates an image composed of colored patches, but places a single word (or text)
inside the "special" patch area.
"""
# Create a 3D NumPy array for the image
img_height = n_patches_y * patch_size
img_width = n_patches_x * patch_size
image_data = np.zeros((img_height, img_width, 3), dtype=np.uint8)
# Fill the entire image with the main color
image_data[:, :] = main_color
# Assign the special color to the special patch area
special_row, special_col = special_patch
image_data[
special_row * patch_size: (special_row + special_patch_width) * patch_size,
special_col * patch_size: (special_col + special_patch_width) * patch_size,
] = special_color
# Convert to a Pillow Image so we can draw on it
img = Image.fromarray(image_data)
draw = ImageDraw.Draw(img)
# Load font with specified size
try:
font = ImageFont.truetype(font_path, font_size)
except IOError:
print(f"Error loading font from {font_path}. Using default font.")
font = ImageFont.load_default()
# Calculate the center of the special patch in pixel coordinates
patch_center_x = special_col * patch_size + \
(special_patch_width * patch_size) // 2
patch_center_y = special_row * patch_size + \
(special_patch_width * patch_size) // 2
# Calculate text bounding box to center the text
text_bbox = draw.textbbox((0, 0), text, font=font)
text_width = text_bbox[2] - text_bbox[0]
text_height = text_bbox[3] - text_bbox[1]
text_x = patch_center_x - text_width // 2
text_y = patch_center_y - text_height // 2
# Place text in the center of the special patch
draw.text((text_x, text_y), text, fill=text_color, font=font)
return img
def visualize_results_grid(results_df):
columns = [results_df.iloc[:, i] for i in range(len(results_df.columns))]
columns = [
(
pd.to_numeric(col, errors="coerce")
if not pd.api.types.is_numeric_dtype(col)
else col
)
for col in columns
]
# Deduce the grid shape from the number of results rows
grid_size = int(np.sqrt(len(results_df)))
# Reshape columns into matrices
matrices = [col.to_numpy().reshape(grid_size, grid_size)
for col in columns]
# Visualization setup
fig, axes = plt.subplots(1, len(results_df.columns), figsize=(12, 2))
titles = [
(
f"{results_df.columns[i]} (Categorical/Binary)"
if i == 0
else f"{results_df.columns[i]} (Continuous)"
)
for i in range(len(results_df.columns))
]
# Added colormap for the fourth plot
cmaps = ["coolwarm"] * len(results_df.columns)
# Plot each matrix
for i, (matrix, ax, title, cmap) in enumerate(zip(matrices, axes, titles, cmaps)):
im = ax.imshow(matrix, cmap=cmap, interpolation="none")
ax.set_title(title)
ax.set_xticks(range(grid_size))
ax.set_yticks(range(grid_size))
fig.colorbar(im, ax=ax)
# Display the plot
plt.tight_layout()
plt.show()
def run_expe_word_square(
word_to_write,
token,
n_patches_x,
n_patches_y,
patch_size,
model,
processor,
device,
use_qwen,
main_color=[255, 255, 255],
special_color=(0, 0, 0),
):
all_images_text = [
create_single_patch_image_with_text(
n_patches_x=n_patches_x,
n_patches_y=n_patches_y,
patch_size=patch_size,
main_color=main_color,
special_color=main_color,
special_patch=(row, col),
text=word_to_write,
text_color=(0, 0, 0), # text_color,
font_size=9,
)
for row in range(0, n_patches_y, 2)
for col in range(0, n_patches_x, 2)
]
all_maps = []
for image in all_images_text:
batch_images = processor.process_images([image]).to(device)
batch_queries = processor.process_queries([token]).to(device)
original_maps, original_image_embeddings, original_query_embeddings = (
get_maps_and_embeds(
batch_images, batch_queries, model, processor, image, use_qwen=use_qwen
)
)
original_maps = original_maps.to(dtype=torch.float32).cpu().numpy()
all_maps.append(original_maps)
input_ids = batch_queries["input_ids"][0] # shape: (num_subtokens,)
token_list = [processor.tokenizer.decode(
[token_id]) for token_id in input_ids]
# print(token_list)
indexes = [i for i, x in enumerate(
token_list) if "<" not in x and ">" not in x][2:]
# print(indexes)
# print(np.array(token_list)[[indexes]])
results_df = pd.DataFrame(columns=["accuracy", "score", "rank"])
for i, (this_map, image) in enumerate(zip(all_maps, all_images_text)):
visual_map = this_map[indexes[0]]
metrics = evaluate_image_maps(visual_map, image)
results_df.loc[i] = metrics.values()
return results_df
|
# Ensure similarity_map is float32 and on the CPU before using numpy operations
similarity_map_cpu = similarity_map.to(dtype=torch.float32).cpu().numpy()
| add a check to avoid this operation if it is already a numpy format | import ast
import inspect
import pytest
def test_similarity_map_cpu_guarded(implementation):
"""
Ensure that within `evaluate_image_maps`, the line with
`similarity_map.to(dtype=torch.float32).cpu().numpy()` is preceded by
an `if` statement that includes 'np' or 'numpy'.
"""
impl_name, module = implementation
module_code = inspect.getsource(module)
lines = module_code.split('\n')
# Strip comments and blank lines
cleaned_lines = []
for line in lines:
stripped = line.strip()
if not stripped or stripped.startswith('#'):
continue
# Remove inline comments
line_no_comment = line.split('#')[0].strip()
cleaned_lines.append(line_no_comment)
# Flag to track whether we're inside the evaluate_image_maps function
inside_target_function = False
function_lines = []
for line in cleaned_lines:
if line.startswith("def evaluate_image_maps("):
inside_target_function = True
continue
# Stop if we’re out of the function by checking indentation
if inside_target_function:
# We know we're out of the target function because the original code is succeeded by a new method
if line.startswith("def ") or line.startswith("class "):
inside_target_function = False
break
function_lines.append(line)
if not function_lines:
pytest.fail("Function evaluate_image_maps not found or is empty")
target_expr = "similarity_map.to(dtype=torch.float32).cpu().numpy()"
for idx, line in enumerate(function_lines):
if target_expr in line:
if idx == 0:
pytest.fail("Expected 'if' statement before similarity_map conversion, got empty line.")
prev_line = function_lines[idx - 1].strip()
assert prev_line.startswith("if"), \
f"Expected 'if' statement before similarity_map conversion, got: {prev_line}"
assert "np" in prev_line or "numpy" in prev_line, \
f"'if' statement before similarity_map conversion does not mention numpy: {prev_line}"
return
pytest.fail(f"Could not find line with: {target_expr}")
| numpy
torch
pytest
pytest-mock
pillow
matplotlib
seaborn
pandas
scikit-learn
colpali-engine | import pytest
import os
import sys
import json
from typing import Dict, List, Optional, Any
# Import from local test_utils.py in the same directory
from test_utils import TestUtils, TestResultsManager
# Load all implementations in the current sandbox
implementations = TestUtils.load_all_implementations()
test_results = TestResultsManager()
@pytest.fixture(scope="session")
def sandbox_dir():
"""Fixture to provide the sandbox directory path."""
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope="session")
def sandbox_name():
"""Fixture to provide the sandbox name."""
return os.path.basename(os.path.dirname(os.path.abspath(__file__)))
@pytest.fixture(scope="session")
def all_implementations():
"""Fixture to provide all implementations as a dictionary."""
return implementations
@pytest.fixture(params=list(implementations.items()))
def implementation(request):
"""Fixture to provide each implementation to tests one at a time."""
return request.param
@pytest.fixture(scope="session")
def results_manager():
"""Fixture to provide access to the test results manager."""
return test_results
# Hook for collecting test results
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""Pytest hook to collect test results."""
# Execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# We're only interested in the call outcome
if rep.when == "call":
if hasattr(item, "callspec") and "implementation" in item.callspec.params:
# Get implementation name and module
impl_name, _ = item.callspec.params["implementation"]
# Get test name
test_name = item.nodeid.split("::")[-1]
# Record result
if rep.passed:
test_results.record_result(impl_name, test_name, True)
elif rep.failed:
error_msg = str(rep.longrepr) if rep.longrepr else "Test failed"
test_results.record_result(impl_name, test_name, False, error_msg)
elif rep.skipped:
skip_reason = rep.longrepr[2] if rep.longrepr else "Test skipped"
test_results.record_skip(impl_name, test_name, skip_reason)
# Hook to save results at the end of testing
@pytest.hookimpl(trylast=True)
def pytest_sessionfinish(session, exitstatus):
"""Save test results at the end of the test session."""
test_results.save_results() | import os
import sys
import glob
import re
import importlib.util
import traceback
import types
from typing import Dict, List, Optional, Any, Tuple
class TestUtils:
@staticmethod
def discover_implementation_files(directory: str = None) -> List[str]:
"""Find all implementation files in the current sandbox directory."""
if directory is None:
directory = os.path.dirname(os.path.abspath(__file__))
patterns = [
r'modified_code\d+\.py',
r'new_code\d+\.py',
# r'original_code\.py',
r'implementation\d*\.py'
]
pattern = re.compile('|'.join(f'({p})' for p in patterns))
implementations = []
for file_path in glob.glob(os.path.join(directory, '*.py')):
if pattern.search(os.path.basename(file_path)):
implementations.append(file_path)
# Sort files numerically
def sort_key(path):
filename = os.path.basename(path)
match = re.search(r'(\d+)', filename)
return int(match.group(1)) if match else 0
return sorted(implementations, key=sort_key)
@staticmethod
def create_mock_module(file_path: str, module_name: str, error_info: str) -> types.ModuleType:
"""Create a mock module that contains error information but can still be tested."""
# Create a new module object
mock_module = types.ModuleType(module_name)
# Add basic attributes
mock_module.__file__ = file_path
mock_module.__name__ = module_name
mock_module.__display_name__ = module_name
mock_module.__error__ = error_info
# Add a dummy function that can be detected by test functions
def dummy_function(*args, **kwargs):
return f"Error in module: {error_info}"
setattr(mock_module, "implementation_error", dummy_function)
return mock_module
@staticmethod
def load_module(file_path: str, module_name: Optional[str] = None) -> Any:
"""
Safely load a module from a file path with proper error handling.
If the module has errors, return a mock module that can still be tested.
"""
if module_name is None:
module_name = os.path.basename(file_path).replace('.py', '')
# Create a unique module name to avoid conflicts
sandbox_id = os.path.basename(os.path.dirname(file_path))
unique_module_name = f"{sandbox_id}_{module_name}"
try:
# First, try to read the file to check for syntax errors
with open(file_path, 'r') as f:
source_code = f.read()
# Check for syntax errors by compiling the code
try:
compiled = compile(source_code, file_path, 'exec')
except SyntaxError as e:
error_msg = f"Syntax error: {str(e)}"
print(f"Syntax error in {file_path}: {e}")
print(f" Line {e.lineno}, column {e.offset}: {e.text}")
return TestUtils.create_mock_module(file_path, unique_module_name, error_msg)
# Create the module spec
spec = importlib.util.spec_from_file_location(unique_module_name, file_path)
if spec is None or spec.loader is None:
error_msg = f"Could not create spec for {file_path}"
print(f"Error: {error_msg}")
return TestUtils.create_mock_module(file_path, unique_module_name, error_msg)
# Create the module object
module = importlib.util.module_from_spec(spec)
sys.modules[unique_module_name] = module
# Special handling for execution errors
try:
# Execute the module code in a safe way
spec.loader.exec_module(module)
# Store the original name for reference
module.__display_name__ = module_name
return module
except Exception as e:
error_msg = f"Runtime error: {str(e)}"
traceback_str = traceback.format_exc()
print(f"Error executing module {file_path}: {e}")
print(traceback_str)
# Create a partial module that contains what we loaded before the error
mock_module = TestUtils.create_mock_module(file_path, unique_module_name, error_msg)
# Copy any attributes that might have been defined before the error
for attr_name in dir(module):
if not attr_name.startswith('__'):
try:
setattr(mock_module, attr_name, getattr(module, attr_name))
except Exception:
pass # Skip attributes that can't be copied
return mock_module
except FileNotFoundError as e:
error_msg = f"File not found: {str(e)}"
print(f"Error: {error_msg}")
return TestUtils.create_mock_module(file_path, unique_module_name, error_msg)
except Exception as e:
error_msg = f"Unexpected error: {str(e)}"
print(f"Error loading module {file_path}: {e}")
return TestUtils.create_mock_module(file_path, unique_module_name, error_msg)
@classmethod
def load_all_implementations(cls, directory: str = None) -> Dict[str, Any]:
"""Load all implementation files in the directory, including those with errors."""
if directory is None:
directory = os.path.dirname(os.path.abspath(__file__))
implementations = {}
implementation_files = cls.discover_implementation_files(directory)
if not implementation_files:
print("WARNING: No implementation files found. Check your file naming patterns.")
for file_path in implementation_files:
module_name = os.path.basename(file_path).replace('.py', '')
module = cls.load_module(file_path, module_name)
# Always add the module, even if it has errors
implementations[module_name] = module
if hasattr(module, '__error__'):
print(f"Loaded with errors: {module_name} - {module.__error__}")
else:
print(f"Successfully loaded: {module_name}")
return implementations
class TestResultsManager:
def __init__(self):
self.results = {}
self.sandbox_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
def record_result(self, impl_name: str, test_name: str, passed: bool,
error_msg: Optional[str] = None) -> None:
"""Record a test result for an implementation."""
if impl_name not in self.results:
self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []}
if passed:
self.results[impl_name]["passed"] += 1
else:
self.results[impl_name]["failed"] += 1
if error_msg:
self.results[impl_name]["errors"].append({
"test": test_name,
"error": error_msg
})
def record_skip(self, impl_name: str, test_name: str,
reason: Optional[str] = None) -> None:
"""Record a skipped test for an implementation."""
if impl_name not in self.results:
self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []}
self.results[impl_name]["skipped"] += 1
if reason:
self.results[impl_name]["errors"].append({
"test": test_name,
"error": f"SKIPPED: {reason}"
})
def get_winner(self) -> Tuple[Optional[int], Dict]:
"""Determine the winner based on test results."""
winner = None
max_passed = -1
for impl_name, results in self.results.items():
if impl_name == "original_code":
continue # Skip original code when determining winner
if results["passed"] > max_passed:
max_passed = results["passed"]
winner = impl_name
# Break ties by looking at failure count
elif results["passed"] == max_passed and winner is not None:
if results["failed"] < self.results[winner]["failed"]:
winner = impl_name
# Convert winner to numeric index if possible
winner_index = -1
if winner and re.match(r'modified_code\d+', winner):
try:
winner_index = int(re.search(r'(\d+)', winner).group(1))
except (AttributeError, ValueError):
pass
return winner_index, self.results
def save_results(self, filename: str = "test_results.json") -> None:
"""Save test results to a JSON file."""
import json
winner_index, results = self.get_winner()
# Check if all tests were skipped
all_skipped = all(
stats["skipped"] == stats["passed"] + stats["failed"] + stats["skipped"]
for impl_name, stats in results.items()
if impl_name != "original_code"
)
output = {
"winner": winner_index,
"all_skipped": all_skipped,
"results": {
name: {
"passed": stats["passed"],
"failed": stats["failed"],
"skipped": stats["skipped"],
"total": stats["passed"] + stats["failed"] + stats["skipped"]
}
for name, stats in results.items()
if not name.startswith("_") # Skip internal items
}
}
with open(filename, "w") as f:
json.dump(output, f, indent=2)
print(f"Test results saved to {filename}")
return output | test | null | null | null | null | null |
103 | python | from ast import Add
from asyncio import wait
from curses import COLOR_BLUE, COLOR_RED
from re import A
from shutil import move
from glm import degrees
from manim import *
from numpy import size, square
class Project(Scene):
def construct(self):
text = Tex("Double Angle")
self.play( Write(text))
self.wait(5)
transform_text = Tex("What is Double Angle?")
transform_text.to_corner(UP)
box = SurroundingRectangle(transform_text)
box.set_color(WHITE)
box.set_stroke(width=1.5)
self.play(
Transform(text, transform_text)
)
self.wait(0.5)
self.play(Create(box))
explanation = Paragraph("A double angle is an angle measurement", "that has been multiplied by 2 or added to itself.", line_spacing=0.5, font_size=32)
explanation.move_to(ORIGIN)
self.play(
Write(explanation)
)
self.wait(3)
self.play(
Transform(explanation, explanation.copy().shift(UP))
)
trig_cos2 = MathTex(
r"\cos2x = \cos^2x - \sin^2x",
substrings_to_isolate=["cos2x"]
)
trig_cos2.set_color_by_tex("cos2x", BLUE)
trig_cos2.move_to(DOWN)
transform_formula = Tex("Double Angle Formula")
transform_formula.to_corner(UP)
self.wait(1)
self.play(
Write(trig_cos2)
)
self.wait(2)
self.play(
FadeOut(trig_cos2, explanation)
)
self.wait(1)
axes = Axes(
x_range=[-2, 2, 2],
y_range=[-2, 2, 2],
x_length=4,
y_length=4,
)
self.add(axes)
# 単位円の作成
circle = Circle(radius=2, color=BLUE)
self.add(circle)
# 原点 (Origin)
dot = Dot(ORIGIN, color=RED)
self.add(dot)
# 角度を表す線分 (Line representing the angle)
line = Line(ORIGIN, RIGHT * 2)
self.add(line)
# 角度のラベル (Angle label)
# Create an Arc for the angle
angle = Arc(
radius=2,
start_angle=0, # Start at the positive x-axis
angle=line.get_angle(), # Use line's angle
arc_center=ORIGIN,
color=GREEN
)
angle_label = MathTex(r"\theta = 0^{\circ}").next_to(angle, RIGHT) # Changed Tex to MathTex and added \\
self.add(angle, angle_label)
intersection_dot = Dot(color=YELLOW)
angle_tracker = ValueTracker(0)
def update_line(mobject):
mobject.become(Line(ORIGIN, RIGHT * 2).rotate(angle_tracker.get_value(), about_point=ORIGIN))
def update_angle(mobject):
mobject.become(Arc(
radius=2,
start_angle=0,
angle=angle_tracker.get_value(),
arc_center=ORIGIN,
color=GREEN
))
line.add_updater(update_line)
angle.add_updater(update_angle)
# Update the angle label
def update_label(mobject):
angle_in_degrees = np.degrees(angle_tracker.get_value())
mobject.become(MathTex(rf"\\theta = {angle_in_degrees:.0f}^{{\circ}}")) # Added double brackets
mobject.next_to(angle, RIGHT)
angle_label.add_updater(update_label)
def update_intersection_dot(mobject):
angle = angle_tracker.get_value()
x = 2 * np.cos(angle) # x-coordinate on the circle
y = 2 * np.sin(angle) # y-coordinate on the circle
mobject.move_to([x, y, 0])
intersection_dot.add_updater(update_intersection_dot)
self.add(intersection_dot)
# Animate the angle
self.play(
angle_tracker.animate.set_value(PI / 6),
run_time=2
)
self.wait(3)
line.clear_updaters()
intersection_dot.clear_updaters()
angle.clear_updaters()
angle_label.clear_updaters()
# Change their color to indicate they are fixed
fixed_line = line.copy().set_color(ORANGE)
fixed_dot = intersection_dot.copy().set_color(ORANGE)
fixed_angle = angle.copy().set_color(ORANGE)
self.add(fixed_line, fixed_dot, fixed_angle)
# Prepare a new line for the next animation
new_line = Line(ORIGIN, RIGHT * 2, color=GREEN)
new_intersection_dot = Dot(color=YELLOW)
new_angle = Arc(
radius=0.5,
start_angle=PI / 6, # Start from 30 degrees
angle=0,
arc_center=ORIGIN,
color=GREEN
)
new_label = MathTex(rf"\theta = 30^\circ").next_to(new_angle, RIGHT).set_color(ORANGE)
# Updaters for the new objects
new_line.add_updater(lambda m: m.become(
Line(ORIGIN, RIGHT * 2).rotate(angle_tracker.get_value(), about_point=ORIGIN)
))
new_intersection_dot.add_updater(lambda m: m.move_to([
2 * np.cos(angle_tracker.get_value()),
2 * np.sin(angle_tracker.get_value()),
0
]))
new_angle.add_updater(lambda m: m.become(
Arc(
radius=0.5,
start_angle=0,
angle=angle_tracker.get_value(),
arc_center=ORIGIN,
color=GREEN
)
))
new_label.add_updater(lambda m: m.become(
MathTex(rf"\theta = {np.degrees(angle_tracker.get_value()):.0f}^\circ").next_to(new_angle, LEFT)
))
# Add the new objects
self.add(new_line, new_intersection_dot, new_angle, new_label)
# Animate from 30 degrees to 60 degrees
self.play(
angle_tracker.animate.set_value(PI / 3), # 60 degrees
run_time=2
)
self.wait(1)
self.wait(10)
self.play(
FadeOut(circle, dot, line, angle, angle_label, axes, line, angle, intersection_dot, angle_label, new_line, new_angle, new_label, new_intersection_dot, fixed_line, fixed_angle, fixed_dot, angle_tracker)
)
self.play(
FadeOut(transform_text, explanation),
Transform(trig_cos2 , trig_cos2.copy().shift(UP + UP + UP)),
Transform(text, transform_formula),
)
self.wait(2)
cos_xx = MathTex(
r"\cos2x = \cos(A+B)"
)
cos_xx.move_to(ORIGIN + UP)
cos_ab = MathTex (
r"\cos(A+B) =(\cos A \cdot \cos B) - (\sin A \cdot \sin B)"
)
cos_ab.move_to(ORIGIN)
let_AB = Tex("Let A = B")
let_AB.move_to(ORIGIN + DOWN)
ab_simple = MathTex(
r"\cos(A+A) = \cos^2A - \sin^2A"
)
ab_simple.move_to(ORIGIN + DOWN + DOWN)
ab_finalize = MathTex(
r"= 1-2\sin^2x"
)
ab_finalize.move_to(ORIGIN + DOWN + DOWN + DOWN + RIGHT)
self.play(
Write(cos_xx)
)
self.wait(0.5)
self.play(
Write(cos_ab),
)
self.wait(0.5)
self.play(
Write(let_AB)
)
self.wait(0.5)
self.play(
Write(ab_simple)
)
self.wait(0.5)
self.play(
Write(ab_finalize)
)
arrow = Arrow(2*UP, 2*DOWN)
VGroup(arrow).set_x(0).arrange(buff=2)
arrow.move_to(ORIGIN + RIGHT + RIGHT + RIGHT + RIGHT + RIGHT + RIGHT)
self.play(Write(arrow))
self.wait(15)
self.play(
FadeOut(text, transform_text, trig_cos2, cos_xx, cos_ab, let_AB, ab_simple, ab_finalize, arrow, box, transform_formula)
)
self.wait(1)
#moving to the explanation of example
#What is proof in Math?
proof = Tex("What is proof?", font_size = 48)
self.play(Write(proof))
self.wait(3)
self.play(
Transform(proof, proof.copy().shift(UP).shift(UP))
)
proof_exp = Paragraph("In trigonometry, a proof is a way to show that ", "two trigonometric expressions are equivalent, regardless of the angle. ","This process is called validating or proving trigonometric identities.", font_size=28)
self.play(Write(proof_exp))
self.wait(8)
self.play(
FadeOut(proof, proof_exp)
)
#starting with Sin and Cos graph identity
ax = Axes()
sine = ax.plot(np.sin, color = RED)
cosine = ax.plot(np.cos, color = BLUE)
self.play(
FadeIn(ax, sine, cosine)
)
red_square = Square(fill_opacity = 1, side_length=0.5, fill_color = RED_C).to_corner(UL)
blue_square = Square(fill_opacity=1, side_length=0.5, fill_color=BLUE_C).to_corner(UL - DOWN)
self.play(DrawBorderThenFill(red_square))
self.play(DrawBorderThenFill(blue_square))
text_sin = MathTex(r"\sin(x)")
text_cos = MathTex(r"\cos(x)")
text_sin.next_to(Square(fill_opacity=1, side_length=0.5, fill_color=RED_C).to_corner(UL))
text_cos.next_to(Square(fill_opacity=1, side_length=0.5, fill_color=BLUE_C).to_corner(UL - DOWN))
# Correct usage of next_to: Multiply RIGHT by a scala
self.play(Write(text_sin))
self.wait(0.5)
self.play(Write(text_cos))
self.wait(0.5)
self.wait(8)
self.play(FadeOut(sine, cosine, text_sin, text_cos, ax, red_square, blue_square))
self.wait(2)
prob_cos = Tex(r"Prove that $\cos\left(x - \frac{\pi}{2}\right)$ is the same as $\sin x$")
self.play(Write(prob_cos))
self.wait(2)
self.play(
Transform(prob_cos, prob_cos.copy().to_corner(UP))
)
self.wait(10)
step1 = Tex(r"1. Make balance equation $\cos\left(x - \frac{\pi}{2}\right) = \sin x$")
step2 = Tex("2. Identify which side is easier to change form, or simplify.")
step3 = Tex("3. Formulate and make it equal to the other side.")
steps = VGroup(step1, step2, step3).arrange(DOWN, aligned_edge=LEFT)
steps.move_to(ORIGIN)
steps.next_to(prob_cos, DOWN, buff=0.5)
self.play(
Write(steps)
)
self.wait(3)
self.play(Circumscribe(step1, Rectangle, time_width=4))
self.play(
FadeOut(step2, step3)
)
step1_exp = MathTex(r"\cos\left(x-\frac{\pi}{2}\right) = \sin x")
step1_exp.move_to(ORIGIN)
self.play(
Write(step1_exp)
)
self.wait(6)
self.play(
FadeOut(step1, step1_exp),
)
self.wait(1)
self.play(
FadeIn(steps),
)
self.wait(3)
self.play(
Circumscribe(step2, Rectangle, time_width=4)
)
self.play(
FadeOut(step1, step3),
Transform(step2, step2.copy().shift(UP))
)
self.wait(3)
step2_exp = MathTex(r"\cos\left(x-\frac{\pi}{2}\right)", color=BLUE)
step2_exp.move_to(ORIGIN)
self.play(Write(step2_exp))
self.wait(2)
step2_exp2 = Tex("Left side is easier to change form", color=BLUE)
step2_exp2.next_to(step2_exp, DOWN)
self.play(Write(step2_exp2))
self.wait(2)
step2_exp3 = MathTex(r"\cos\left(x-\frac{\pi}{2}\right) = \cos(A-B)", color=WHITE)
step2_exp3.move_to(ORIGIN)
self.play(
Transform(step2_exp, step2_exp3),
FadeOut(step2_exp2)
)
self.wait(2)
step2_exp4 = MathTex(r"\cos(A-B) = \cos A \cos B + \sin A \sin B", color=BLUE)
step2_exp4.next_to(step2_exp3, DOWN)
self.play(Write(step2_exp4))
self.wait(2)
step2_exp5 = MathTex(r"A = x, B = \frac{\pi}{2}", color=BLUE)
step2_exp5.next_to(step2_exp4, DOWN)
self.play(Write(step2_exp5))
self.wait(2)
step2_exp6 = MathTex(r"\cos x \cos \frac{\pi}{2} + \sin x \sin \frac{\pi}{2}", color=WHITE)
step2_exp6.move_to(ORIGIN)
self.play(
FadeOut(step2_exp, step2_exp4, step2_exp5),
Write(step2_exp6)
)
self.wait(2)
step2_exp7 = MathTex(r"\cos \frac{\pi}{2} = 0, \sin \frac{\pi}{2} = 1", color=BLUE)
step2_exp7.next_to(step2_exp6, DOWN)
self.play(Write(step2_exp7))
self.wait(2)
step2_exp8 = MathTex(r"\cos x (0) + \sin x (1) = \sin x", color=WHITE)
step2_exp8.move_to(ORIGIN)
self.play(
FadeOut(step2_exp6, step2_exp7),
Write(step2_exp8)
)
self.wait(2)
self.play(FadeOut(step2_exp8, step2))
self.wait(15)
| step2_exp8 = MathTex(r"\cos x (0) + \sin x (1) = \sin x", color=WHITE)
step2_exp8.move_to(ORIGIN)
self.play(
FadeOut(step2_exp6, step2_exp7),
Write(step2_exp8)
)
self.wait(2)
self.play(FadeOut(step2_exp8, step2)) | Move the proved sinx to center of the screen and fade out rest of equation | import pytest
import re
import inspect
from typing import List
import ast
def get_source_code(impl_name, module) -> str:
"""Get the source code of the implementation module"""
try:
return inspect.getsource(module)
except Exception:
return ""
import re
from typing import List
def test_moves_sinx_equation_to_center(implementation):
"""Test if sinx (step2_exp8) is moved to the center of the screen"""
impl_name, module = implementation
code = get_source_code(impl_name, module)
# Look for .move_to(ORIGIN) or .animate.move_to(ORIGIN) applied to sinx object
moved = re.search(r'step2_exp8(\.animate)?\.move_to\s*\(\s*ORIGIN\s*\)', code)
assert moved, f"{impl_name} does not move sinx (step2_exp8) to center using move_to(ORIGIN)"
def test_fades_out_other_equations(implementation):
"""Test if other equations (e.g. step2_exp6, step2_exp7) are faded out"""
impl_name, module = implementation
code = get_source_code(impl_name, module)
# Look for FadeOut involving other step2 expressions
fadeout_other = re.search(r'FadeOut\s*\(\s*step2_exp6\s*,\s*step2_exp7\s*\)', code) or \
re.search(r'FadeOut\s*\(\s*step2_exp\d+', code)
assert fadeout_other, f"{impl_name} does not fade out other equations like step2_exp6, step2_exp7"
| pytest
pytest-mock
manim
numpy
pyglm
pydub | import pytest
import os
import sys
import json
from typing import Dict, List, Optional, Any
# Import from local test_utils.py in the same directory
from test_utils import TestUtils, TestResultsManager
# Load all implementations in the current sandbox
implementations = TestUtils.load_all_implementations()
test_results = TestResultsManager()
@pytest.fixture(scope="session")
def sandbox_dir():
"""Fixture to provide the sandbox directory path."""
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope="session")
def sandbox_name():
"""Fixture to provide the sandbox name."""
return os.path.basename(os.path.dirname(os.path.abspath(__file__)))
@pytest.fixture(scope="session")
def all_implementations():
"""Fixture to provide all implementations as a dictionary."""
return implementations
@pytest.fixture(params=list(implementations.items()))
def implementation(request):
"""Fixture to provide each implementation to tests one at a time."""
return request.param
@pytest.fixture(scope="session")
def results_manager():
"""Fixture to provide access to the test results manager."""
return test_results
# Hook for collecting test results
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""Pytest hook to collect test results."""
# Execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# We're only interested in the call outcome
if rep.when == "call":
if hasattr(item, "callspec") and "implementation" in item.callspec.params:
# Get implementation name and module
impl_name, _ = item.callspec.params["implementation"]
# Get test name
test_name = item.nodeid.split("::")[-1]
# Record result
if rep.passed:
test_results.record_result(impl_name, test_name, True)
elif rep.failed:
error_msg = str(rep.longrepr) if rep.longrepr else "Test failed"
test_results.record_result(impl_name, test_name, False, error_msg)
elif rep.skipped:
skip_reason = rep.longrepr[2] if rep.longrepr else "Test skipped"
test_results.record_skip(impl_name, test_name, skip_reason)
# Hook to save results at the end of testing
@pytest.hookimpl(trylast=True)
def pytest_sessionfinish(session, exitstatus):
"""Save test results at the end of the test session."""
test_results.save_results() | import os
import sys
import glob
import re
import importlib.util
import traceback
import types
from typing import Dict, List, Optional, Any, Tuple
class TestUtils:
@staticmethod
def discover_implementation_files(directory: str = None) -> List[str]:
"""Find all implementation files in the current sandbox directory."""
if directory is None:
directory = os.path.dirname(os.path.abspath(__file__))
patterns = [
r'modified_code\d+\.py',
r'new_code\d+\.py',
# r'original_code\.py',
r'implementation\d*\.py'
]
pattern = re.compile('|'.join(f'({p})' for p in patterns))
implementations = []
for file_path in glob.glob(os.path.join(directory, '*.py')):
if pattern.search(os.path.basename(file_path)):
implementations.append(file_path)
# Sort files numerically
def sort_key(path):
filename = os.path.basename(path)
match = re.search(r'(\d+)', filename)
return int(match.group(1)) if match else 0
return sorted(implementations, key=sort_key)
@staticmethod
def create_mock_module(file_path: str, module_name: str, error_info: str) -> types.ModuleType:
"""Create a mock module that contains error information but can still be tested."""
# Create a new module object
mock_module = types.ModuleType(module_name)
# Add basic attributes
mock_module.__file__ = file_path
mock_module.__name__ = module_name
mock_module.__display_name__ = module_name
mock_module.__error__ = error_info
# Add a dummy function that can be detected by test functions
def dummy_function(*args, **kwargs):
return f"Error in module: {error_info}"
setattr(mock_module, "implementation_error", dummy_function)
return mock_module
@staticmethod
def load_module(file_path: str, module_name: Optional[str] = None) -> Any:
"""
Safely load a module from a file path with proper error handling.
If the module has errors, return a mock module that can still be tested.
"""
if module_name is None:
module_name = os.path.basename(file_path).replace('.py', '')
# Create a unique module name to avoid conflicts
sandbox_id = os.path.basename(os.path.dirname(file_path))
unique_module_name = f"{sandbox_id}_{module_name}"
try:
# First, try to read the file to check for syntax errors
with open(file_path, 'r') as f:
source_code = f.read()
# Check for syntax errors by compiling the code
try:
compiled = compile(source_code, file_path, 'exec')
except SyntaxError as e:
error_msg = f"Syntax error: {str(e)}"
print(f"Syntax error in {file_path}: {e}")
print(f" Line {e.lineno}, column {e.offset}: {e.text}")
return TestUtils.create_mock_module(file_path, unique_module_name, error_msg)
# Create the module spec
spec = importlib.util.spec_from_file_location(unique_module_name, file_path)
if spec is None or spec.loader is None:
error_msg = f"Could not create spec for {file_path}"
print(f"Error: {error_msg}")
return TestUtils.create_mock_module(file_path, unique_module_name, error_msg)
# Create the module object
module = importlib.util.module_from_spec(spec)
sys.modules[unique_module_name] = module
# Special handling for execution errors
try:
# Execute the module code in a safe way
spec.loader.exec_module(module)
# Store the original name for reference
module.__display_name__ = module_name
return module
except Exception as e:
error_msg = f"Runtime error: {str(e)}"
traceback_str = traceback.format_exc()
print(f"Error executing module {file_path}: {e}")
print(traceback_str)
# Create a partial module that contains what we loaded before the error
mock_module = TestUtils.create_mock_module(file_path, unique_module_name, error_msg)
# Copy any attributes that might have been defined before the error
for attr_name in dir(module):
if not attr_name.startswith('__'):
try:
setattr(mock_module, attr_name, getattr(module, attr_name))
except Exception:
pass # Skip attributes that can't be copied
return mock_module
except FileNotFoundError as e:
error_msg = f"File not found: {str(e)}"
print(f"Error: {error_msg}")
return TestUtils.create_mock_module(file_path, unique_module_name, error_msg)
except Exception as e:
error_msg = f"Unexpected error: {str(e)}"
print(f"Error loading module {file_path}: {e}")
return TestUtils.create_mock_module(file_path, unique_module_name, error_msg)
@classmethod
def load_all_implementations(cls, directory: str = None) -> Dict[str, Any]:
"""Load all implementation files in the directory, including those with errors."""
if directory is None:
directory = os.path.dirname(os.path.abspath(__file__))
implementations = {}
implementation_files = cls.discover_implementation_files(directory)
if not implementation_files:
print("WARNING: No implementation files found. Check your file naming patterns.")
for file_path in implementation_files:
module_name = os.path.basename(file_path).replace('.py', '')
module = cls.load_module(file_path, module_name)
# Always add the module, even if it has errors
implementations[module_name] = module
if hasattr(module, '__error__'):
print(f"Loaded with errors: {module_name} - {module.__error__}")
else:
print(f"Successfully loaded: {module_name}")
return implementations
class TestResultsManager:
def __init__(self):
self.results = {}
self.sandbox_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
def record_result(self, impl_name: str, test_name: str, passed: bool,
error_msg: Optional[str] = None) -> None:
"""Record a test result for an implementation."""
if impl_name not in self.results:
self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []}
if passed:
self.results[impl_name]["passed"] += 1
else:
self.results[impl_name]["failed"] += 1
if error_msg:
self.results[impl_name]["errors"].append({
"test": test_name,
"error": error_msg
})
def record_skip(self, impl_name: str, test_name: str,
reason: Optional[str] = None) -> None:
"""Record a skipped test for an implementation."""
if impl_name not in self.results:
self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []}
self.results[impl_name]["skipped"] += 1
if reason:
self.results[impl_name]["errors"].append({
"test": test_name,
"error": f"SKIPPED: {reason}"
})
def get_winner(self) -> Tuple[Optional[int], Dict]:
"""Determine the winner based on test results."""
winner = None
max_passed = -1
for impl_name, results in self.results.items():
if impl_name == "original_code":
continue # Skip original code when determining winner
if results["passed"] > max_passed:
max_passed = results["passed"]
winner = impl_name
# Break ties by looking at failure count
elif results["passed"] == max_passed and winner is not None:
if results["failed"] < self.results[winner]["failed"]:
winner = impl_name
# Convert winner to numeric index if possible
winner_index = -1
if winner and re.match(r'modified_code\d+', winner):
try:
winner_index = int(re.search(r'(\d+)', winner).group(1))
except (AttributeError, ValueError):
pass
return winner_index, self.results
def save_results(self, filename: str = "test_results.json") -> None:
"""Save test results to a JSON file."""
import json
winner_index, results = self.get_winner()
# Check if all tests were skipped
all_skipped = all(
stats["skipped"] == stats["passed"] + stats["failed"] + stats["skipped"]
for impl_name, stats in results.items()
if impl_name != "original_code"
)
output = {
"winner": winner_index,
"all_skipped": all_skipped,
"results": {
name: {
"passed": stats["passed"],
"failed": stats["failed"],
"skipped": stats["skipped"],
"total": stats["passed"] + stats["failed"] + stats["skipped"]
}
for name, stats in results.items()
if not name.startswith("_") # Skip internal items
}
}
with open(filename, "w") as f:
json.dump(output, f, indent=2)
print(f"Test results saved to {filename}")
return output | test | null | null | null | null | null |
104 | python | import requests #для запроса к API
import xml.etree.ElementTree #для обработки xml-ответа API
import datetime #для дат по оси иксов
import pickle #для хранения переменных в файле
import json
#фак ю нигер
#допиши чтобы set_valutes заполнялось!!! оно факинг нигер и не заполняется
#класс валюта
class valute():
"""Валюта и всё с ней связанное, через ЦБ РФ \n
Требуются библеотеки: \n
requests \n
xml.etree.ElementTree \n
datetime \n
pickle \n
json \n
"""
def __init__(self, name):
self.name = name
def correct_name(self):
"""Проверка имени валюты на наличие в множестве валют. Множество обновляется не чаще раза в день"""
info_opened_file = open(r"D:\MoexAPI_bot_aiogram3\data_files\Info.json", "r", encoding="utf-8") #открываем файл инфы, encoding чтобы не было
info = json.load(info_opened_file)
info_opened_file.close()
if datetime.datetime.now() - datetime.timedelta(days=1) > datetime.datetime.strptime(info["last_day_check"]["valute"], "%Y-%m-%d %H:%M:%S.%f"): #проверяем условие что дата перезаписи списка валют это хотя бы 1 день назад
#если отличается более чем на 1 день, то переписываем список (множество) валют:
set_valutes = set() #создаём пустое множество, в него будем заливать валюты
s = "http://www.cbr.ru/scripts/XML_daily.asp"
r = requests.get(s)
root = xml.etree.ElementTree.fromstring(r.content) #запрос всё равно выдаёт данные сайта как строку, так что без fromstring никак
for Valute in root.findall("Valute"):
CharCode = Valute.find("CharCode")
set_valutes.add(CharCode.text) #заливаем валюты в наше множество
set_valutes_file_opened = open(r"D:\MoexAPI_bot_aiogram3\data_files\set_valutes.bin", "wb") #открываем файл для бинарной записи множества тикеров в него
pickle.dump(set_valutes, set_valutes_file_opened) #закидываем созданное множество в файл. Если что, каждый раз будет перезаписываться (проверено)
set_valutes_file_opened.close() #закрываем файл
#поменяем время последнего обновления
info["last_day_check"]["valute"] = str(datetime.datetime.now())
info_opened_file = open(r"D:\MoexAPI_bot_aiogram3\data_files\Info.json", "w", encoding="utf-8")
json.dump(info, info_opened_file, indent = 3, ensure_ascii = False) #запишем новый файл
info_opened_file.close()
#теперь просто проверим есть ли валюта в списке валют
set_valutes_file_opened = open(r"D:\MoexAPI_bot_aiogram3\data_files\set_valutes.bin", "rb") #открываем файл с множеством тикеров чтобы его оттуда получить
set_valutes = pickle.load(set_valutes_file_opened) #из открытого файла выгружаем значение множества валют в переменную. Если вдруг запишется несколько множеств (такого быть не должно), то откроется только первое из них
if self.name in set_valutes: #просто проверяем есть ли валюта в множестве тикеров
return True
else:
return False
def CurrentExchangeRate(self):
'''Текущий курс обмена валюты на рубль'''
r = requests.get("http://www.cbr.ru/scripts/XML_daily.asp") #Api ЦБ РФ
root = xml.etree.ElementTree.fromstring(r.content)
for Valute in root.findall("Valute"): #ищем контейнеры валюты
for CharCode in Valute.findall("CharCode"): #ищем контейнеры чаркодов
if CharCode.text == self.name: #находим контейнер с нужной валютой
return (Valute.find("VunitRate").text) | def correct_name(self):
"""Проверка имени валюты на наличие в множестве валют. Множество обновляется не чаще раза в день"""
info_opened_file = open(r"D:\MoexAPI_bot_aiogram3\data_files\Info.json", "r", encoding="utf-8") #открываем файл инфы, encoding чтобы не было
info = json.load(info_opened_file)
info_opened_file.close()
if datetime.datetime.now() - datetime.timedelta(days=1) > datetime.datetime.strptime(info["last_day_check"]["valute"], "%Y-%m-%d %H:%M:%S.%f"): #проверяем условие что дата перезаписи списка валют это хотя бы 1 день назад
#если отличается более чем на 1 день, то переписываем список (множество) валют:
set_valutes = set() #создаём пустое множество, в него будем заливать валюты
s = "http://www.cbr.ru/scripts/XML_daily.asp"
r = requests.get(s)
root = xml.etree.ElementTree.fromstring(r.content) #запрос всё равно выдаёт данные сайта как строку, так что без fromstring никак
for Valute in root.findall("Valute"):
CharCode = Valute.find("CharCode")
set_valutes.add(CharCode.text) #заливаем валюты в наше множество
set_valutes_file_opened = open(r"D:\MoexAPI_bot_aiogram3\data_files\set_valutes.bin", "wb") #открываем файл для бинарной записи множества тикеров в него
pickle.dump(set_valutes, set_valutes_file_opened) #закидываем созданное множество в файл. Если что, каждый раз будет перезаписываться (проверено)
set_valutes_file_opened.close() #закрываем файл
#поменяем время последнего обновления
info["last_day_check"]["valute"] = str(datetime.datetime.now())
info_opened_file = open(r"D:\MoexAPI_bot_aiogram3\data_files\Info.json", "w", encoding="utf-8")
json.dump(info, info_opened_file, indent = 3, ensure_ascii = False) #запишем новый файл
info_opened_file.close()
#теперь просто проверим есть ли валюта в списке валют
set_valutes_file_opened = open(r"D:\MoexAPI_bot_aiogram3\data_files\set_valutes.bin", "rb") #открываем файл с множеством тикеров чтобы его оттуда получить
set_valutes = pickle.load(set_valutes_file_opened) #из открытого файла выгружаем значение множества валют в переменную. Если вдруг запишется несколько множеств (такого быть не должно), то откроется только первое из них
if self.name in set_valutes: #просто проверяем есть ли валюта в множестве тикеров
return True
else:
return False | перепиши метод асинхронно, импортировав aiofiles и сохранив мои комментарии | import asyncio
import inspect
import json
import pickle
from datetime import datetime, timedelta
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
import sys
import aiofiles
class AsyncContextManagerMock:
"""A mock for async context managers with awaitable methods like read/write"""
def __init__(self, read_data=None):
self.aenter_return = MagicMock()
self.aenter_return.read = AsyncMock(return_value=read_data)
self.aenter_return.write = AsyncMock()
self.aenter_return.close = AsyncMock()
async def __aenter__(self):
return self.aenter_return
async def __aexit__(self, *args):
pass
@pytest.fixture
def mock_files():
"""Setup mock file data for testing"""
info_data = {
"last_day_check": {
"valute": (datetime.now() - timedelta(days=2)).strftime("%Y-%m-%d %H:%M:%S.%f")
}
}
info_data_str = json.dumps(info_data)
set_valutes = {"USD", "EUR", "GBP"}
set_valutes_bytes = pickle.dumps(set_valutes)
xml_content = """
<ValCurs>
<Valute ID="R01235">
<NumCode>840</NumCode>
<CharCode>USD</CharCode>
<Nominal>1</Nominal>
<Name>Доллар США</Name>
<Value>75,1234</Value>
<VunitRate>75,1234</VunitRate>
</Valute>
<Valute ID="R01239">
<NumCode>978</NumCode>
<CharCode>EUR</CharCode>
<Nominal>1</Nominal>
<Name>Евро</Name>
<Value>85,5678</Value>
<VunitRate>85,5678</VunitRate>
</Valute>
</ValCurs>
"""
return {
"info_data_str": info_data_str,
"info_data": info_data,
"set_valutes": set_valutes,
"set_valutes_bytes": set_valutes_bytes,
"xml_content": xml_content.strip()
}
def is_any_path_match(path, patterns):
"""Check if any pattern is in the path string"""
if not isinstance(path, str):
return False
path = path.lower().replace('\\', '/').replace('//', '/')
return any(pattern.lower() in path for pattern in patterns)
def aiofiles_open_side_effect_factory(mock_files):
"""Factory to return a patched aiofiles.open function"""
def side_effect(*args, **kwargs):
path = args[0] if args else ""
if is_any_path_match(path, ["info.json"]):
return AsyncContextManagerMock(read_data=mock_files["info_data_str"])
elif is_any_path_match(path, ["set_valutes.bin"]):
return AsyncContextManagerMock(read_data=mock_files["set_valutes_bytes"])
else:
return AsyncContextManagerMock(read_data="{}")
return side_effect
def test_correct_imports_and_async_def(implementation):
"""Ensure aiofiles is imported and correct_name is async"""
impl_name, module = implementation
source_code = inspect.getsource(module)
assert "aiofiles" in source_code, "Implementation should import aiofiles"
valute_class = getattr(module, "valute", None)
assert valute_class is not None
assert asyncio.iscoroutinefunction(valute_class.correct_name), "correct_name should be async"
@pytest.mark.asyncio
async def test_correct_name_logic_async(implementation, mock_files):
"""Test correct_name returns correct value and uses aiofiles properly"""
impl_name, module = implementation
sys.modules[module.__name__].aiofiles = aiofiles
valute_class = getattr(module, "valute")
valute_instance = valute_class("USD")
invalid_instance = valute_class("XYZ")
with patch("aiofiles.open", side_effect=aiofiles_open_side_effect_factory(mock_files)), \
patch("pickle.loads", return_value=mock_files["set_valutes"]), \
patch("requests.get") as mock_get:
mock_response = MagicMock()
mock_response.content = mock_files["xml_content"]
mock_get.return_value = mock_response
result_valid = await valute_instance.correct_name()
result_invalid = await invalid_instance.correct_name()
assert result_valid is True, "Expected True for valid currency"
assert result_invalid is False, "Expected False for invalid currency"
@pytest.mark.asyncio
async def test_uses_aiofiles_open_exclusively(implementation, mock_files):
"""Test that aiofiles.open is used instead of built-in open"""
impl_name, module = implementation
sys.modules[module.__name__].aiofiles = aiofiles
valute_class = getattr(module, "valute")
valute_instance = valute_class("USD")
with patch("aiofiles.open", side_effect=aiofiles_open_side_effect_factory(mock_files)) as mock_aio_open, \
patch("builtins.open") as mock_builtin_open, \
patch("pickle.loads", return_value=mock_files["set_valutes"]), \
patch("requests.get") as mock_get:
mock_response = MagicMock()
mock_response.content = mock_files["xml_content"]
mock_get.return_value = mock_response
await valute_instance.correct_name()
# Assert aiofiles.open is used
assert mock_aio_open.called, "aiofiles.open should be used for file I/O"
# Assert regular open is not used
assert not mock_builtin_open.called, "Built-in open() should NOT be used in async method" | aiofiles
aiohttp
pytest
pytest-asyncio
pytest-mock
requests | import pytest
import os
import sys
import json
from typing import Dict, List, Optional, Any
# Import from local test_utils.py in the same directory
from test_utils import TestUtils, TestResultsManager
# Load all implementations in the current sandbox
implementations = TestUtils.load_all_implementations()
test_results = TestResultsManager()
@pytest.fixture(scope="session")
def sandbox_dir():
"""Fixture to provide the sandbox directory path."""
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope="session")
def sandbox_name():
"""Fixture to provide the sandbox name."""
return os.path.basename(os.path.dirname(os.path.abspath(__file__)))
@pytest.fixture(scope="session")
def all_implementations():
"""Fixture to provide all implementations as a dictionary."""
return implementations
@pytest.fixture(params=list(implementations.items()))
def implementation(request):
"""Fixture to provide each implementation to tests one at a time."""
return request.param
@pytest.fixture(scope="session")
def results_manager():
"""Fixture to provide access to the test results manager."""
return test_results
# Hook for collecting test results
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""Pytest hook to collect test results."""
# Execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# We're only interested in the call outcome
if rep.when == "call":
if hasattr(item, "callspec") and "implementation" in item.callspec.params:
# Get implementation name and module
impl_name, _ = item.callspec.params["implementation"]
# Get test name
test_name = item.nodeid.split("::")[-1]
# Record result
if rep.passed:
test_results.record_result(impl_name, test_name, True)
elif rep.failed:
error_msg = str(rep.longrepr) if rep.longrepr else "Test failed"
test_results.record_result(impl_name, test_name, False, error_msg)
elif rep.skipped:
skip_reason = rep.longrepr[2] if rep.longrepr else "Test skipped"
test_results.record_skip(impl_name, test_name, skip_reason)
# Hook to save results at the end of testing
@pytest.hookimpl(trylast=True)
def pytest_sessionfinish(session, exitstatus):
"""Save test results at the end of the test session."""
test_results.save_results() | import os
import sys
import glob
import re
import importlib.util
import traceback
import types
from typing import Dict, List, Optional, Any, Tuple
class TestUtils:
@staticmethod
def discover_implementation_files(directory: str = None) -> List[str]:
"""Find all implementation files in the current sandbox directory."""
if directory is None:
directory = os.path.dirname(os.path.abspath(__file__))
patterns = [
r'modified_code\d+\.py',
r'new_code\d+\.py',
# r'original_code\.py',
r'implementation\d*\.py'
]
pattern = re.compile('|'.join(f'({p})' for p in patterns))
implementations = []
for file_path in glob.glob(os.path.join(directory, '*.py')):
if pattern.search(os.path.basename(file_path)):
implementations.append(file_path)
# Sort files numerically
def sort_key(path):
filename = os.path.basename(path)
match = re.search(r'(\d+)', filename)
return int(match.group(1)) if match else 0
return sorted(implementations, key=sort_key)
@staticmethod
def create_mock_module(file_path: str, module_name: str, error_info: str) -> types.ModuleType:
"""Create a mock module that contains error information but can still be tested."""
# Create a new module object
mock_module = types.ModuleType(module_name)
# Add basic attributes
mock_module.__file__ = file_path
mock_module.__name__ = module_name
mock_module.__display_name__ = module_name
mock_module.__error__ = error_info
# Add a dummy function that can be detected by test functions
def dummy_function(*args, **kwargs):
return f"Error in module: {error_info}"
setattr(mock_module, "implementation_error", dummy_function)
return mock_module
@staticmethod
def load_module(file_path: str, module_name: Optional[str] = None) -> Any:
"""
Safely load a module from a file path with proper error handling.
If the module has errors, return a mock module that can still be tested.
"""
if module_name is None:
module_name = os.path.basename(file_path).replace('.py', '')
# Create a unique module name to avoid conflicts
sandbox_id = os.path.basename(os.path.dirname(file_path))
unique_module_name = f"{sandbox_id}_{module_name}"
try:
# First, try to read the file to check for syntax errors
with open(file_path, 'r') as f:
source_code = f.read()
# Check for syntax errors by compiling the code
try:
compiled = compile(source_code, file_path, 'exec')
except SyntaxError as e:
error_msg = f"Syntax error: {str(e)}"
print(f"Syntax error in {file_path}: {e}")
print(f" Line {e.lineno}, column {e.offset}: {e.text}")
return TestUtils.create_mock_module(file_path, unique_module_name, error_msg)
# Create the module spec
spec = importlib.util.spec_from_file_location(unique_module_name, file_path)
if spec is None or spec.loader is None:
error_msg = f"Could not create spec for {file_path}"
print(f"Error: {error_msg}")
return TestUtils.create_mock_module(file_path, unique_module_name, error_msg)
# Create the module object
module = importlib.util.module_from_spec(spec)
sys.modules[unique_module_name] = module
# Special handling for execution errors
try:
# Execute the module code in a safe way
spec.loader.exec_module(module)
# Store the original name for reference
module.__display_name__ = module_name
return module
except Exception as e:
error_msg = f"Runtime error: {str(e)}"
traceback_str = traceback.format_exc()
print(f"Error executing module {file_path}: {e}")
print(traceback_str)
# Create a partial module that contains what we loaded before the error
mock_module = TestUtils.create_mock_module(file_path, unique_module_name, error_msg)
# Copy any attributes that might have been defined before the error
for attr_name in dir(module):
if not attr_name.startswith('__'):
try:
setattr(mock_module, attr_name, getattr(module, attr_name))
except Exception:
pass # Skip attributes that can't be copied
return mock_module
except FileNotFoundError as e:
error_msg = f"File not found: {str(e)}"
print(f"Error: {error_msg}")
return TestUtils.create_mock_module(file_path, unique_module_name, error_msg)
except Exception as e:
error_msg = f"Unexpected error: {str(e)}"
print(f"Error loading module {file_path}: {e}")
return TestUtils.create_mock_module(file_path, unique_module_name, error_msg)
@classmethod
def load_all_implementations(cls, directory: str = None) -> Dict[str, Any]:
"""Load all implementation files in the directory, including those with errors."""
if directory is None:
directory = os.path.dirname(os.path.abspath(__file__))
implementations = {}
implementation_files = cls.discover_implementation_files(directory)
if not implementation_files:
print("WARNING: No implementation files found. Check your file naming patterns.")
for file_path in implementation_files:
module_name = os.path.basename(file_path).replace('.py', '')
module = cls.load_module(file_path, module_name)
# Always add the module, even if it has errors
implementations[module_name] = module
if hasattr(module, '__error__'):
print(f"Loaded with errors: {module_name} - {module.__error__}")
else:
print(f"Successfully loaded: {module_name}")
return implementations
class TestResultsManager:
def __init__(self):
self.results = {}
self.sandbox_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
def record_result(self, impl_name: str, test_name: str, passed: bool,
error_msg: Optional[str] = None) -> None:
"""Record a test result for an implementation."""
if impl_name not in self.results:
self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []}
if passed:
self.results[impl_name]["passed"] += 1
else:
self.results[impl_name]["failed"] += 1
if error_msg:
self.results[impl_name]["errors"].append({
"test": test_name,
"error": error_msg
})
def record_skip(self, impl_name: str, test_name: str,
reason: Optional[str] = None) -> None:
"""Record a skipped test for an implementation."""
if impl_name not in self.results:
self.results[impl_name] = {"passed": 0, "failed": 0, "skipped": 0, "errors": []}
self.results[impl_name]["skipped"] += 1
if reason:
self.results[impl_name]["errors"].append({
"test": test_name,
"error": f"SKIPPED: {reason}"
})
def get_winner(self) -> Tuple[Optional[int], Dict]:
"""Determine the winner based on test results."""
winner = None
max_passed = -1
for impl_name, results in self.results.items():
if impl_name == "original_code":
continue # Skip original code when determining winner
if results["passed"] > max_passed:
max_passed = results["passed"]
winner = impl_name
# Break ties by looking at failure count
elif results["passed"] == max_passed and winner is not None:
if results["failed"] < self.results[winner]["failed"]:
winner = impl_name
# Convert winner to numeric index if possible
winner_index = -1
if winner and re.match(r'modified_code\d+', winner):
try:
winner_index = int(re.search(r'(\d+)', winner).group(1))
except (AttributeError, ValueError):
pass
return winner_index, self.results
def save_results(self, filename: str = "test_results.json") -> None:
"""Save test results to a JSON file."""
import json
winner_index, results = self.get_winner()
# Check if all tests were skipped
all_skipped = all(
stats["skipped"] == stats["passed"] + stats["failed"] + stats["skipped"]
for impl_name, stats in results.items()
if impl_name != "original_code"
)
output = {
"winner": winner_index,
"all_skipped": all_skipped,
"results": {
name: {
"passed": stats["passed"],
"failed": stats["failed"],
"skipped": stats["skipped"],
"total": stats["passed"] + stats["failed"] + stats["skipped"]
}
for name, stats in results.items()
if not name.startswith("_") # Skip internal items
}
}
with open(filename, "w") as f:
json.dump(output, f, indent=2)
print(f"Test results saved to {filename}")
return output | test | null | null | null | null | null |
105 | javascript | import { messages } from "./messages.js";
$().ready(() => {
const loading = $('.container-loading');
const payment = $('.payment-section');
const info = $('.user-info');
const main = $('.main');
// Retrieve values from localStorage
const storedData = JSON.parse(localStorage.getItem('userData')) || {};
const { userInfo, paymentInfo } = storedData;
// Use the retrieved data as needed
console.log('User Info:', userInfo);
console.log('Payment Info:', paymentInfo);
$('#generateTaxButton').click(() => {
main.fadeOut(500);
setTimeout(() => {
loading.css('display', 'flex');
let lastTimeout = 0;
messages.forEach(message => {
lastTimeout = lastTimeout + message.time;
})
console.log(`intervalo: ${lastTimeout}`)
const loadMessages = $('#loading-messages');
messages.forEach(element => {
console.log(element.text)
console.log(element.time)
const timeout = element.time;
setTimeout(() => {
loadMessages.text(element.text);
}, timeout);
});
setTimeout(() => {
console.log('pagamento');
loading.css('display', 'none');
payment.css('display', 'block');
info.css('display', 'block');
}, lastTimeout + 500);
}, 200);
});
}); | // Retrieve values from localStorage
const storedData = JSON.parse(localStorage.getItem('userData')) || {};
const { userInfo, paymentInfo } = storedData;
// Use the retrieved data as needed
console.log('User Info:', userInfo);
console.log('Payment Info:', paymentInfo); | with jquerry | /**
* Test suite for jQuery implementations
*
* This suite evaluates implementations against two key criteria:
* 1. Avoiding deprecated $.parseJSON method
* 2. Using jQuery methods to manipulate data
*/
// Import utilities from jest-setup.js
const {
discoverImplementationFiles,
countJQueryUsage,
usesDeprecatedParseJSON,
recordTestResult,
originalJQueryCount
} = require('../jest-setup');
// =====================================================================
// Main Test Suite
// =====================================================================
describe('jQuery Implementation Tests', () => {
// Discover implementations
const implementations = discoverImplementationFiles();
// Log current implementation files
console.log("Testing implementations:", implementations.map(impl => impl.name).join(', '));
// Test each implementation
implementations.forEach(impl => {
describe(`Implementation: ${impl.name}`, () => {
// =====================================================================
// Test 1: Deprecated Method Check
// =====================================================================
test('should not use deprecated $.parseJSON method', () => {
// Direct source code analysis for $.parseJSON usage
const usesDeprecated = usesDeprecatedParseJSON(impl.code);
// Record test result
recordTestResult(impl.name, 'avoids_deprecated_parseJSON', !usesDeprecated);
// Test assertion - with descriptive error message
if (usesDeprecated) {
console.warn(`${impl.name} uses deprecated $.parseJSON method`);
}
expect(usesDeprecated).toBeFalsy();
});
// =====================================================================
// Test 2: jQuery Data Manipulation Check
// =====================================================================
test('should use jQuery methods to manipulate data', () => {
// Count jQuery usage in this implementation
const jQueryUsageCount = countJQueryUsage(impl.code);
// Implementation should have at least the same count of jQuery usage as original code
// to demonstrate it's properly using jQuery for data manipulation
const usesJQueryForData = jQueryUsageCount >= originalJQueryCount;
// Also check for localStorage usage (since we want to ensure data is being used)
const usesLocalStorage = impl.code.includes('localStorage.getItem') &&
(impl.code.includes('userInfo') ||
impl.code.includes('paymentInfo') ||
impl.code.includes('userData'));
// Log debugging information
console.log(`${impl.name} jQuery usage: ${jQueryUsageCount} (original: ${originalJQueryCount}), Uses localStorage: ${usesLocalStorage}`);
// Implementation passes if it uses jQuery at least as much as original and accesses localStorage
const effectivelyUsesJQuery = usesJQueryForData && usesLocalStorage;
recordTestResult(impl.name, 'uses_jquery_for_data', effectivelyUsesJQuery);
// Test assertion
expect(effectivelyUsesJQuery).toBeTruthy();
});
});
});
}); | null | null | null | test | {
"name": "js-test-framework",
"version": "1.0.0",
"description": "JavaScript testing framework for multiple implementations",
"main": "index.js",
"scripts": {
"test": "jest"
},
"devDependencies": {
"jest": "^29.7.0",
"glob": "^10.3.10",
"@babel/core": "^7.21.4",
"@babel/preset-env": "^7.21.4",
"babel-jest": "^29.7.0"
},
"jest": {
"setupFilesAfterEnv": ["<rootDir>/jest-setup.js"],
"testEnvironment": "node",
"testMatch": ["**/tests/**/*.test.js"],
"verbose": true,
"collectCoverage": false,
"moduleNameMapper": {
"\\./messages\\.js": "<rootDir>/__mocks__/messages.js"
},
"transform": {
"^.+\\.jsx?$": "babel-jest"
},
"transformIgnorePatterns": [
"/node_modules/",
"tagged_code.js",
"highlighted_code.js"
]
}
} | /**
* Jest setup file for jQuery implementations tests
*/
const fs = require('fs');
const path = require('path');
const glob = require('glob');
// =====================================================================
// Test Utilities
// =====================================================================
/**
* Discovers implementation files to test based on naming patterns
* @returns {Array} Array of implementation objects with name, path, and code
*/
function discoverImplementationFiles() {
const patterns = [
'modified_code\\d+\\.js',
'new_code\\d+\\.js',
'original_modified_code\\d+\\.js'
];
const regexPattern = new RegExp(patterns.join('|'));
const files = glob.sync(path.join(__dirname, '*.js'));
return files
.filter(filePath => regexPattern.test(path.basename(filePath)))
.map(filePath => ({
name: path.basename(filePath, '.js'),
path: filePath,
code: fs.readFileSync(filePath, 'utf8')
}));
}
/**
* Test result tracking system
*/
const testResults = {};
const testTracking = {}; // Track which tests have been run for each implementation
/**
* Records test results for a specific implementation
* @param {string} implementation - Implementation name
* @param {string} testName - Test name
* @param {boolean} passed - Whether the test passed
*/
function recordTestResult(implementation, testName, passed) {
// Initialize implementation results if needed
if (!testResults[implementation]) {
testResults[implementation] = { passed: 0, failed: 0, skipped: 0, total: 0 };
testTracking[implementation] = new Set();
}
// Check if this test has already been recorded for this implementation
const testKey = `${testName}`;
if (testTracking[implementation].has(testKey)) {
return; // Skip recording duplicate test results
}
// Mark this test as recorded
testTracking[implementation].add(testKey);
// Update test counts
if (passed) {
testResults[implementation].passed++;
} else {
testResults[implementation].failed++;
}
testResults[implementation].total =
testResults[implementation].passed +
testResults[implementation].failed +
testResults[implementation].skipped;
}
/**
* Determines the winner based on test results
* @returns {number} The winner index or -1 if no winner
*/
function determineWinner() {
let winner = null;
let maxPassed = -1;
let minFailed = Number.MAX_SAFE_INTEGER;
for (const implName in testResults) {
// Skip original implementations
if (implName.startsWith('original_')) {
continue;
}
const results = testResults[implName];
if (results.passed > maxPassed ||
(results.passed === maxPassed && results.failed < minFailed)) {
maxPassed = results.passed;
minFailed = results.failed;
winner = implName;
}
}
// Convert winner to numeric index
let winnerIndex = -1;
if (winner) {
if (winner.startsWith('modified_code')) {
const match = winner.match(/(\d+)/);
if (match) {
winnerIndex = parseInt(match[1], 10);
}
} else if (winner.startsWith('new_code')) {
const match = winner.match(/(\d+)/);
if (match) {
winnerIndex = parseInt(match[1], 10);
}
}
}
return winnerIndex;
}
/**
* Saves test results to JSON file
* @returns {Object} The test results object
*/
function saveTestResults() {
const winnerIndex = determineWinner();
const output = {
winner: winnerIndex,
all_skipped: false,
results: {}
};
for (const [name, stats] of Object.entries(testResults)) {
output.results[name] = {
passed: stats.passed,
failed: stats.failed,
skipped: stats.skipped,
total: stats.total
};
}
const outputPath = path.join(__dirname, 'test_results.json');
fs.writeFileSync(outputPath, JSON.stringify(output, null, 2));
console.log(`Test results saved to test_results.json`);
return output;
}
/**
* Counts jQuery usage patterns in code
* @param {string} code - Source code to analyze
* @returns {number} Count of jQuery usage patterns
*/
function countJQueryUsage(code) {
// Count occurrences of $ usage
// This includes $(selectors), $.method, $(document).ready, etc.
const dollarSignCount = (code.match(/\$/g) || []).length;
// Count occurrences of jQuery usage if it's used instead of $
const jQueryCount = (code.match(/jQuery/g) || []).length;
return dollarSignCount + jQueryCount;
}
/**
* Checks if code uses deprecated $.parseJSON method
* @param {string} code - Source code to analyze
* @returns {boolean} Whether code uses deprecated $.parseJSON
*/
function usesDeprecatedParseJSON(code) {
// Look for the exact pattern $.parseJSON or jQuery.parseJSON with proper boundary checks
const parseJSONPattern = /(\$|jQuery)\.parseJSON\s*\(/;
return parseJSONPattern.test(code);
}
// Load original code for comparison
const originalCodePath = path.join(__dirname, 'original_code.js');
const originalCode = fs.readFileSync(originalCodePath, 'utf8');
const originalJQueryCount = countJQueryUsage(originalCode);
// Set up global variables for Jest tests
beforeAll(() => {
global.__TEST_UTILS__ = {
discoverImplementationFiles,
countJQueryUsage,
usesDeprecatedParseJSON
};
global.__TEST_RESULTS__ = {
testResults,
testTracking,
recordTestResult,
determineWinner,
saveTestResults
};
global.__JQUERY_DATA__ = {
originalCode,
originalJQueryCount
};
});
// After all tests run, save the results
afterAll(() => {
// Display final results before saving
console.log("\nFinal Test Results:");
for (const [name, stats] of Object.entries(testResults)) {
console.log(`${name}: ${stats.passed} passes, ${stats.failed} fails (total: ${stats.total})`);
}
const results = saveTestResults();
console.log(`Winner: ${results.winner !== undefined ? results.winner : 'None'}`);
});
// Export for use in tests
module.exports = {
discoverImplementationFiles,
countJQueryUsage,
usesDeprecatedParseJSON,
recordTestResult,
determineWinner,
saveTestResults,
testResults,
originalJQueryCount
}; | module.exports = {
presets: [
['@babel/preset-env', {targets: {node: 'current'}}]
]
}; | {
"hidden.js": "import { messages } from \"./messages.js\";\n\n$(() => {\n const $loading = $('.container-loading');\n const $payment = $('.payment-section');\n const $info = $('.user-info');\n const $main = $('.main');\n const $loadMessages = $('#loading-messages');\n\n // Retrieve and display user data using jQuery\n const storedData = JSON.parse(localStorage.getItem('userData')) || {};\n const { userInfo, paymentInfo } = storedData;\n\n console.log('User Info:', userInfo);\n console.log('Payment Info:', paymentInfo);\n\n if (userInfo) {\n $('.user-name').text(userInfo.name || '');\n $('.user-email').text(userInfo.email || '');\n }\n\n if (paymentInfo) {\n $('.payment-amount').text(`$${paymentInfo.amount || '0.00'}`);\n $('.payment-date').text(paymentInfo.date || '');\n }\n\n $('#generateTaxButton').on('click', () => {\n $main.fadeOut(500, () => {\n $loading.css('display', 'flex');\n\n let lastTimeout = 0;\n messages.forEach(msg => {\n lastTimeout += msg.time;\n });\n\n messages.forEach(msg => {\n setTimeout(() => {\n $loadMessages.text(msg.text);\n }, msg.time);\n });\n\n setTimeout(() => {\n $loading.hide();\n $payment.show();\n $info.show();\n }, lastTimeout + 500);\n });\n });\n});\n",
"__mocks__/messages.js": "// Mock for messages.js\nexport const messages = [\n { text: \"Loading data...\", time: 1000 },\n { text: \"Processing information...\", time: 2000 },\n { text: \"Calculating taxes...\", time: 3000 },\n { text: \"Finalizing results...\", time: 1500 }\n];",
"__mocks__/jquery.js": "// jQuery mock\nconst elementCache = {};\nconst clickHandlers = {};\n\nconst jquery = function(selector) {\n // Cache elements to ensure the same mock instance is returned for the same selector\n if (!elementCache[selector]) {\n elementCache[selector] = {\n selector,\n ready: function(callback) {\n if (typeof callback === 'function') {\n // Store the callback for later execution\n if (!jquery.readyCallbacks) {\n jquery.readyCallbacks = [];\n }\n jquery.readyCallbacks.push(callback);\n }\n return this;\n },\n text: jest.fn(function(value) {\n if (value !== undefined) {\n this.textValue = value;\n return this;\n }\n return this.textValue || '';\n }),\n css: jest.fn(function(prop, value) {\n if (!this.cssProps) this.cssProps = {};\n this.cssProps[prop] = value;\n return this;\n }),\n fadeOut: jest.fn(function(duration) {\n return this;\n }),\n fadeIn: jest.fn(function(duration) {\n return this;\n }),\n click: function(callback) {\n clickHandlers[selector] = callback;\n return this;\n },\n // Method to trigger the click handler\n triggerClick: function() {\n if (typeof clickHandlers[selector] === 'function') {\n clickHandlers[selector]();\n }\n return this;\n }\n };\n }\n\n return elementCache[selector];\n};\n\n// Helper to execute all ready callbacks\njquery.executeReady = function() {\n if (jquery.readyCallbacks) {\n jquery.readyCallbacks.forEach(callback => {\n try {\n callback();\n } catch (e) {\n console.error('Error in ready callback:', e);\n }\n });\n }\n};\n\n// Extend $ with utility methods\njquery.each = jest.fn((obj, callback) => {\n if (obj && typeof callback === 'function') {\n Object.entries(obj).forEach(([key, value]) => {\n callback(key, value);\n });\n }\n});\n\njquery.parseJSON = jest.fn((data) => {\n // This method is deprecated in jQuery - this should cause a test failure\n try {\n return JSON.parse(data);\n } catch (e) {\n throw new Error('Invalid JSON');\n }\n});\n\n// Reset mock function to clear counters\njquery.resetMocks = function() {\n Object.values(elementCache).forEach(el => {\n if (el.text && el.text.mockClear) el.text.mockClear();\n if (el.css && el.css.mockClear) el.css.mockClear();\n if (el.fadeOut && el.fadeOut.mockClear) el.fadeOut.mockClear();\n if (el.fadeIn && el.fadeIn.mockClear) el.fadeIn.mockClear();\n });\n\n jquery.each.mockClear();\n jquery.parseJSON.mockClear();\n};\n\n// Set global $ variable\nglobal.$ = jquery;\n\n// Export both as default and as named export\nmodule.exports = jquery;",
"__mocks__/MapCharacter.jsx": null,
"__mocks__/Sprite.jsx": null,
"__mocks__/GameMap.jsx": null,
"__mocks__/CharacterStatUI.jsx": null,
"__mocks__/CharacterData.js": null,
"__mocks__/react-router-dom.js": null,
"__mocks__/styleMock.js": null,
"__mocks__/character-stat-ui/CharacterStatUI.jsx": null,
"jest.config.js": null,
"__mocks__/database.js": null,
"__mocks__/camera.service.js": null,
"__mocks__/module-loader.js": null,
"__mocks__/globe.js": null,
"__mocks__/fetch.js": null,
"__mocks__/three.js": null,
"__mocks__/fileMock.js": null,
"__mocks__/d3.js": null,
"__mocks__/document.js": null,
"__mocks__/SingleCharacterStatUI.js": null,
"__mocks__/MockChild.js": null,
"__mocks__/Sprite.js": null,
"highlighted_code.jsx": null,
"mocks/CharacterStatUIMock.jsx": null,
"mocks/CharacterStatUIMock.js": null,
"__mocks__/react-icons/md.js": null,
"__mocks__/api/query.js": null
} | null |
106 | javascript | import React, { useEffect, useState, useCallback } from 'react';
import styles from './GameUI.module.css';
import { useLocation } from 'react-router-dom';
import CharacterStatUI from '../character-stat-ui/CharacterStatUI';
import Sprite from '../sprite/Sprite';
import GameMap from '../game-map/GameMap';
import { characterData } from '../character-data/CharacterData';
import MapCharacter from '../map-character/MapCharacter';
const publicFolder = `${process.env.PUBLIC_URL}`;
const GameUI = () => {
const location = useLocation();
const frontPageState = location.state || {};
const character = frontPageState.character;
const map = frontPageState.map;
// UPDATE UI STATES
// Default UI states
const [characterUIState, setCharacterUIState] = useState({});
const [mapState, setMapState] = useState({});
const [clickedState, setClickedState] = useState(null);
const [selectedCharacter, setSelectedCharacter] = useState("Alfonse");
const characterNames = ["Alfonse","Sharena","Anna","Fjorm"];
const [characters, setCharacters] = useState([
for (let i = 0; i < characterNames.length; i++) {
characterNames[i]: characterData(characterName)
}
],[characterNames]);
const mapSetup = useCallback(() => {
if (!map) {
return {};
}
const name = map.name || '';
const imageUrl = map.image ? `${publicFolder}${map.image}` : `${process.env.PUBLIC_URL}/assets/images/map/Map_S0001.jpg`;
return { name, imageUrl };
}, [map]);
useEffect(() => {
setMapState(mapSetup());
}, [map, mapSetup]);
useEffect(() => {
if (selectedCharacter) {
const selectedCharData = characterData(selectedCharacter);
setCharacterUIState({
charName : selectedCharacter,
level : selectedCharData.level,
wpn : selectedCharData.wpn,
hp : selectedCharData.hp,
atk : selectedCharData.atk,
spd : selectedCharData.spd,
def : selectedCharData.def,
res : selectedCharData.res
});
}
}, [selectedCharacter, setCharacterUIState]);
// Update UI State after click
const handleGridClick = useCallback((gridX, gridY) => {
console.log(`Grid clicked at X: ${gridX}, Y: ${gridY}`);
setClickedState({ gridX, gridY });
}, [setClickedState, clickedState]);
return (
<div className={styles['game-container']}>
<div className={styles['content-wrapper']}>
<CharacterStatUI
charName={characterUIState.charName || ''}
level={characterUIState.level || 0}
wpn={characterUIState.wpn || ''}
hp={characterUIState.hp || 0}
atk={characterUIState.atk || 0}
spd={characterUIState.spd || 0}
def={characterUIState.def || 0}
res={characterUIState.res || 0}
/>
<div className={styles['map-container']}>
<GameMap
onGridClick={handleGridClick}
/>
</div>
{characterNames.map((characterName) => (
<MapCharacter
key={characterName}
character={characterName}
/>
))}
<div className={styles['actionButtonsContainer']}>
<div className={styles['button-group']}>
<div className={styles['leftAlignedButtons']}>
<Sprite spriteName="ButtonBg1">
<button className={styles['action-button']}>1</button>
</Sprite>
<Sprite spriteName="ButtonBg1">
<button className={styles['action-button']}>2</button>
</Sprite>
<Sprite spriteName="ButtonBg1">
<button className={styles['action-button']}>3</button>
</Sprite>
</div>
<div className={styles['rightAlignedButtons']}>
<Sprite spriteName="ButtonBg1">
<button className={styles['action-button']}>4</button>
</Sprite>
<Sprite spriteName="ButtonBg1">
<button className={styles['action-button']}>5</button>
</Sprite>
</div>
</div>
</div>
</div>
</div>
);
};
export default GameUI;
| const [characters, setCharacters] = useState([
for (let i = 0; i < characterNames.length; i++) {
characterNames[i]: characterData(characterName)
}
],[characterNames]); | Please fix this error: 'Line 28:4: Parsing error: Unexpected token (28:4)' | const fs = require('fs');
const path = require('path');
const { resultsManager } = require('../jest-setup');
/**
* A focused test that executes the character data mapping and validates the structure
*/
describe('GameUI Character Data Mapping Tests', () => {
// Clear existing test results to make sure we only include our tested files
resultsManager.results = {};
// Define exactly which patterns we want to test - no more, no less
const codePatterns = [
/^original_code\.jsx?$/,
/^modified_code\d+\.jsx?$/,
/^new_code\d+\.jsx?$/,
/^original_modified_code\d+\.jsx?$/
];
// Get implementation files, with precise filtering
const files = fs.readdirSync(path.join(__dirname, '..'))
.filter(file => {
// Only include files matching our specific patterns
return codePatterns.some(pattern => pattern.test(file));
});
test('All implementations correctly map character data', () => {
files.forEach(fileName => {
const filePath = path.join(__dirname, '..', fileName);
const implName = fileName.replace(/\.(js|jsx)$/, '');
const content = fs.readFileSync(filePath, 'utf8');
try {
// Extract the character mapping code and test it
const charMappingResult = testCharacterMapping(content);
// Record test results
resultsManager.recordResult(implName, 'compilesSuccessfully', true);
resultsManager.recordResult(implName, 'characterDataStructure',
charMappingResult.valid,
charMappingResult.valid ? null : charMappingResult.reason);
} catch (error) {
// If we can't extract or run the character mapping code,
// log the issue but mark it as passed since we don't want to fail due to extraction issues
resultsManager.recordResult(implName, 'compilesSuccessfully', false);
resultsManager.recordResult(implName, 'characterDataStructure', false);
}
});
});
/**
* Extract and test character data mapping from the component
*/
function testCharacterMapping(code) {
try {
// Extract the useState call for characters
const useStateMatch = code.match(/const\s+\[\s*characters\s*,\s*setCharacters\s*\]\s*=\s*useState\s*\(([^;]*)\)/s);
if (!useStateMatch || !useStateMatch[1]) {
// If we can't find the useState call, then fail
return { valid: false, reason: null };
}
// Set up test environment with character data
const characterNames = ["Alfonse", "Sharena", "Anna", "Fjorm"];
const characterData = (name) => ({
level: 40,
wpn: 'TestWeapon',
hp: 40,
atk: 30,
spd: 25,
def: 20,
res: 20
});
// Execute the useState initialization code
let result;
const execCode = useStateMatch[1].trim();
// If it's a function, we need to execute it
if (execCode.startsWith('() =>') || execCode.startsWith('function')) {
const funcBody = new Function('characterNames', 'characterData', `
return ${execCode.replace(/^\(\)\s*=>\s*/, '')};
`);
result = funcBody(characterNames, characterData);
} else {
// Otherwise, execute it directly
const directExec = new Function('characterNames', 'characterData', `
return ${execCode};
`);
result = directExec(characterNames, characterData);
}
// Validate the character data structure
if (!result) {
return { valid: false, reason: 'Character data is null or undefined' };
}
// Only accept object format with character names as keys
if (Array.isArray(result)) {
// Array format is incorrect
return {
valid: false,
reason: 'Array format is incorrect. Must use object with character names as keys.'
};
}
else if (typeof result === 'object') {
// Object with character names as keys is the only valid format
const hasValidKeys = Object.keys(result).some(key =>
characterNames.includes(key) &&
result[key] && typeof result[key] === 'object'
);
if (hasValidKeys) {
return { valid: true, reason: null };
}
return {
valid: false,
reason: 'Object format does not use character names as keys with data values'
};
}
// If we got here, it's not a valid format
return {
valid: false,
reason: 'Not a valid character data structure (neither array nor object)'
};
} catch (error) {
// If there's an error executing the code, it might be a syntax issue
// in the extraction process, not the actual code, so we pass it
return { valid: true, reason: null };
}
}
}); | null | null | null | test | {
"name": "js-test-framework",
"version": "1.0.0",
"description": "JavaScript testing framework for multiple implementations",
"main": "index.js",
"scripts": {
"test": "jest"
},
"devDependencies": {
"@babel/core": "^7.27.1",
"@babel/preset-env": "^7.27.2",
"@babel/preset-react": "^7.27.1",
"@testing-library/jest-dom": "^5.16.5",
"@testing-library/react": "^14.0.0",
"babel-core": "^6.26.3",
"babel-jest": "^29.5.0",
"glob": "^10.3.10",
"jest": "^29.7.0",
"jest-environment-jsdom": "^29.5.0",
"jsdom": "^26.1.0",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-router-dom": "^6.13.0"
},
"jest": {
"setupFilesAfterEnv": [
"./jest-setup.js"
],
"testEnvironment": "jsdom",
"testMatch": [
"**/tests/**/*.test.js"
],
"verbose": true,
"moduleNameMapper": {
"\\.(css|less|scss|sass)$": "<rootDir>/__mocks__/styleMock.js"
},
"transform": {
"^.+\\.(js|jsx)$": "babel-jest"
}
}
}
| // jest-setup.js - Copy this file to each implementation folder
const fs = require('fs');
const path = require('path');
const glob = require('glob');
const { TextEncoder, TextDecoder } = require('util');
global.TextEncoder = TextEncoder;
global.TextDecoder = TextDecoder;
// Import @testing-library/jest-dom
require('@testing-library/jest-dom');
/**
* Utility class to handle JavaScript implementations
*/
class TestUtils {
/**
* Find all implementation files in the current directory
* @param {string} directory - Directory to search in (defaults to current directory)
* @returns {Array<string>} List of implementation file paths
*/
static discoverImplementationFiles(directory = null) {
if (!directory) {
directory = __dirname;
}
const patterns = [
'original_code\\.jsx?',
'original_modified_code\\d+\\.jsx?',
'modified_code\\d+\\.jsx?',
'new_code\\d+\\.jsx?',
'implementation\\d*\\.jsx?'
];
const regexPattern = new RegExp(patterns.join('|'));
const implementations = [];
// Use glob to find matching files
const files = glob.sync(path.join(directory, '*.{js,jsx}'));
for (const filePath of files) {
if (regexPattern.test(path.basename(filePath))) {
implementations.push(filePath);
}
}
// Sort files numerically
implementations.sort((a, b) => {
const aMatch = path.basename(a).match(/(\d+)/);
const bMatch = path.basename(b).match(/(\d+)/);
const aNum = aMatch ? parseInt(aMatch[1]) : 0;
const bNum = bMatch ? parseInt(bMatch[1]) : 0;
return aNum - bNum;
});
return implementations;
}
/**
* Safely load a module from a file path
* @param {string} filePath - Path to the JavaScript or JSX file
* @param {string} moduleName - Optional module name (defaults to filename)
* @returns {Object} Loaded module with error information if any
*/
static loadModule(filePath, moduleName = null) {
if (!moduleName) {
moduleName = path.basename(filePath).replace(/\.(js|jsx)$/, '');
}
// Create unique module name to avoid conflicts
const sandboxId = path.basename(path.dirname(filePath));
const uniqueModuleName = `${sandboxId}_${moduleName}`;
try {
// Read file contents
const sourceCode = fs.readFileSync(filePath, 'utf8');
// Create module object
const moduleObj = {
__file__: filePath,
__name__: uniqueModuleName,
__display_name__: moduleName,
__errors__: [], // Track errors in the module
__source__: sourceCode // Store source code for testing
};
// For JSX files, we don't do syntax checking as it would require a full JSX parser
if (!filePath.endsWith('.jsx')) {
try {
// Try to test-compile the code to check for syntax errors (only for .js files)
new Function(sourceCode);
} catch (e) {
const errorMsg = `Syntax error: ${e.message}`;
console.error(`Syntax error in ${filePath}: ${e.message}`);
console.error(` Line ${e.lineNumber}, column ${e.columnNumber}`);
// Record the error but continue loading what we can
moduleObj.__errors__.push({
type: 'syntax',
message: errorMsg,
lineNumber: e.lineNumber,
columnNumber: e.columnNumber
});
}
}
try {
// Try to require the module even if there were syntax errors
// This may or may not succeed
delete require.cache[require.resolve(filePath)];
const loadedModule = require(filePath);
// Copy all properties from the loaded module
for (const key in loadedModule) {
if (Object.prototype.hasOwnProperty.call(loadedModule, key)) {
moduleObj[key] = loadedModule[key];
}
}
} catch (e) {
const errorMsg = `Runtime error: ${e.message}`;
console.error(`Error executing module ${filePath}: ${e.message}`);
console.error(e.stack);
// Record the runtime error
moduleObj.__errors__.push({
type: 'runtime',
message: errorMsg,
stack: e.stack
});
}
return moduleObj;
} catch (e) {
const moduleObj = {
__file__: filePath,
__name__: uniqueModuleName,
__display_name__: moduleName,
__errors__: []
};
if (e.code === 'ENOENT') {
const errorMsg = `File not found: ${e.message}`;
console.error(`Error: ${errorMsg}`);
moduleObj.__errors__.push({
type: 'file',
message: errorMsg
});
} else {
const errorMsg = `Unexpected error: ${e.message}`;
console.error(`Error loading module ${filePath}: ${e.message}`);
moduleObj.__errors__.push({
type: 'unknown',
message: errorMsg
});
}
return moduleObj;
}
}
/**
* Load all implementation files in the directory
* @param {string} directory - Directory to search in (defaults to current directory)
* @returns {Object} Dictionary mapping module names to loaded modules
*/
static loadAllImplementations(directory = null) {
if (!directory) {
directory = __dirname;
}
const implementations = {};
const implementationFiles = this.discoverImplementationFiles(directory);
if (implementationFiles.length === 0) {
console.warn("WARNING: No implementation files found. Check your file naming patterns.");
}
for (const filePath of implementationFiles) {
const moduleName = path.basename(filePath).replace('.js', '');
const module = this.loadModule(filePath, moduleName);
// Always add the module, even if it has errors
implementations[moduleName] = module;
if (module.__errors__ && module.__errors__.length > 0) {
console.log(`Loaded with errors: ${moduleName} - ${module.__errors__.length} errors found`);
module.__errors__.forEach(err => console.log(` - ${err.type}: ${err.message}`));
} else {
console.log(`Successfully loaded: ${moduleName}`);
}
}
return implementations;
}
/**
* Check if a function exists in a module and is callable
* @param {Object} module - The loaded module
* @param {string} functionName - Name of the function to test
* @returns {boolean} Whether the function exists and is callable
*/
static hasFunction(module, functionName) {
return module && typeof module[functionName] === 'function';
}
/**
* Safely call a function in a module with error handling
* @param {Object} module - The loaded module
* @param {string} functionName - Name of the function to call
* @param {Array} args - Arguments to pass to the function
* @returns {Object} Result with success status and value or error
*/
static callFunction(module, functionName, ...args) {
if (!this.hasFunction(module, functionName)) {
return {
success: false,
error: `Function '${functionName}' not found or not callable`
};
}
try {
const result = module[functionName](...args);
return {
success: true,
value: result
};
} catch (e) {
return {
success: false,
error: e.message,
stack: e.stack
};
}
}
}
/**
* Class to manage test results
*/
class TestResultsManager {
constructor() {
this.results = {};
this.sandboxName = path.basename(__dirname);
}
/**
* Record a test result for an implementation
* @param {string} implName - Implementation name
* @param {string} testName - Test name
* @param {boolean} passed - Whether the test passed
* @param {string} errorMsg - Optional error message
*/
recordResult(implName, testName, passed, errorMsg = null) {
if (!this.results[implName]) {
this.results[implName] = { passed: 0, failed: 0, skipped: 0, errors: [] };
}
if (passed) {
this.results[implName].passed += 1;
} else {
this.results[implName].failed += 1;
if (errorMsg) {
this.results[implName].errors.push({
test: testName,
error: errorMsg
});
}
}
}
/**
* Record a skipped test for an implementation
* @param {string} implName - Implementation name
* @param {string} testName - Test name
* @param {string} reason - Optional reason for skipping
*/
recordSkip(implName, testName, reason = null) {
if (!this.results[implName]) {
this.results[implName] = { passed: 0, failed: 0, skipped: 0, errors: [] };
}
this.results[implName].skipped += 1;
if (reason) {
this.results[implName].errors.push({
test: testName,
error: `SKIPPED: ${reason}`
});
}
}
/**
* Determine the winner based on test results
* @returns {Array} [winner index, results]
*/
getWinner() {
let winner = null;
let maxPassed = -1;
for (const [implName, results] of Object.entries(this.results)) {
if (implName === "original_code") {
continue; // Skip original code when determining winner
}
if (results.passed > maxPassed) {
maxPassed = results.passed;
winner = implName;
} else if (results.passed === maxPassed && winner !== null) {
if (results.failed < this.results[winner].failed) {
winner = implName;
}
}
}
// Convert winner to numeric index if possible
let winnerIndex = -1;
if (winner && /modified_code\d+/.test(winner)) {
const match = winner.match(/(\d+)/);
if (match) {
winnerIndex = parseInt(match[1]);
}
}
return [winnerIndex, this.results];
}
/**
* Save test results to a JSON file
* @param {string} filename - Output filename
* @returns {Object} Results summary object
*/
saveResults(filename = "test_results.json") {
const [winnerIndex, results] = this.getWinner();
// Check if all tests were skipped
const allSkipped = Object.entries(results)
.every(([_, stats]) => {
return stats.skipped === (stats.passed + stats.failed + stats.skipped);
});
const output = {
winner: winnerIndex,
all_skipped: allSkipped,
results: {}
};
for (const [name, stats] of Object.entries(results)) {
if (!name.startsWith("_")) {
output.results[name] = {
passed: stats.passed,
failed: stats.failed,
skipped: stats.skipped,
total: stats.passed + stats.failed + stats.skipped
};
}
}
fs.writeFileSync(filename, JSON.stringify(output, null, 2));
console.log(`Test results saved to ${filename}`);
return output;
}
}
// Load implementations for this specific implementation directory
const implementations = TestUtils.loadAllImplementations();
const resultsManager = new TestResultsManager();
// Set up global variables for Jest tests
beforeAll(() => {
global.__TEST_UTILS__ = TestUtils;
global.__RESULTS_MANAGER__ = resultsManager;
global.__IMPLEMENTATIONS__ = implementations;
});
// After all tests run, save the results
afterAll(() => {
resultsManager.saveResults();
});
// Export for use in tests
module.exports = {
TestUtils,
TestResultsManager,
implementations,
resultsManager
}; | module.exports = {
presets: [
'@babel/preset-env',
['@babel/preset-react', { runtime: 'automatic' }],
],
}; | {
"hidden.js": null,
"__mocks__/messages.js": null,
"__mocks__/jquery.js": null,
"__mocks__/MapCharacter.jsx": "import React from 'react';\n\nconst MapCharacter = ({ character }) => (\n <div data-testid={`map-character-${character}`}>\n {character}\n </div>\n);\n\nexport default MapCharacter;",
"__mocks__/Sprite.jsx": "import React from 'react';\n\nconst Sprite = ({ spriteName, children }) => (\n <div data-testid={`sprite-${spriteName}`}>\n {children}\n </div>\n);\n\nexport default Sprite;",
"__mocks__/GameMap.jsx": "import React from 'react';\n\nconst GameMap = (props) => (\n <div data-testid=\"game-map\" onClick={() => props.onGridClick && props.onGridClick(1, 1)}>\n Game Map\n </div>\n);\n\nexport default GameMap;",
"__mocks__/CharacterStatUI.jsx": "import React from 'react';\n\nconst CharacterStatUI = (props) => (\n <div data-testid=\"character-stats\">\n <span data-testid=\"char-name\">{props.charName}</span>\n <span data-testid=\"char-level\">{props.level}</span>\n <span data-testid=\"char-weapon\">{props.wpn}</span>\n <span data-testid=\"char-hp\">{props.hp}</span>\n <span data-testid=\"char-atk\">{props.atk}</span>\n <span data-testid=\"char-spd\">{props.spd}</span>\n <span data-testid=\"char-def\">{props.def}</span>\n <span data-testid=\"char-res\">{props.res}</span>\n </div>\n);\n\nexport default CharacterStatUI;",
"__mocks__/CharacterData.js": "export const characterData = (characterName) => {\n return {\n name: characterName,\n level: 10,\n wpn: 'Weapon',\n hp: 100,\n atk: 50,\n spd: 25,\n def: 30,\n res: 20\n };\n};",
"__mocks__/react-router-dom.js": "const React = require('react');\n\nconst useLocation = jest.fn().mockReturnValue({\n state: {\n character: 'Alfonse',\n map: {\n name: 'Test Map',\n image: '/test-map.jpg'\n }\n }\n});\n\nmodule.exports = {\n useLocation,\n MemoryRouter: ({ children }) => React.createElement('div', null, children)\n};",
"__mocks__/styleMock.js": "module.exports = {};",
"__mocks__/character-stat-ui/CharacterStatUI.jsx": "// Mock component for the CharacterStatUI\nconst CharacterStatUI = ({ character }) => {\n return null;\n};\n\nexport default CharacterStatUI;",
"jest.config.js": null,
"__mocks__/database.js": null,
"__mocks__/camera.service.js": null,
"__mocks__/module-loader.js": null,
"__mocks__/globe.js": null,
"__mocks__/fetch.js": null,
"__mocks__/three.js": null,
"__mocks__/fileMock.js": null,
"__mocks__/d3.js": null,
"__mocks__/document.js": null,
"__mocks__/SingleCharacterStatUI.js": null,
"__mocks__/MockChild.js": null,
"__mocks__/Sprite.js": null,
"highlighted_code.jsx": null,
"mocks/CharacterStatUIMock.jsx": null,
"mocks/CharacterStatUIMock.js": null,
"__mocks__/react-icons/md.js": null,
"__mocks__/api/query.js": null
} | null |
107 | javascript | import { useState, useEffect, useCallback, useMemo } from 'react';
function useDashboardData(user) {
const [data, setData] = useState({
customerData: { summary: null, loading: false, customers: [] },
healthData: [],
websiteStatus: { checking: false },
stripeApiKey: "",
dateRange: {
startDate: (() => {
const date = new Date();
date.setFullYear(date.getFullYear() - 1);
return new Date(date);
})(),
endDate: new Date(),
}
});
const calculateHealthData = useCallback(() => {
if (!data.customerData.summary?.customers) return [];
const months = [];
const currentDate = new Date(data.dateRange.startDate);
while (currentDate <= data.dateRange.endDate) {
months.push({
month: currentDate.toLocaleString("default", { month: "short" }),
year: currentDate.getFullYear(),
});
currentDate.setMonth(currentDate.getMonth() + 1);
}
return months.map(({ month, year }) => {
const monthYear = `${month} ${year}`;
const monthCustomers = data.customerData.summary.customers.filter(customer => {
const customerDate = new Date(customer.created);
return customerDate.getMonth() === new Date(`${year}-${month}-01`).getMonth() &&
customerDate.getFullYear() === year;
});
return {
monthYear,
healthy: monthCustomers.filter(c => c.status === "active").length,
warning: monthCustomers.filter(c => c.status === "churned").length,
critical: monthCustomers.filter(c => c.status === "delinquent").length,
};
});
}, [data.customerData.summary, data.dateRange]);
const loadSettings = useCallback(async () => {
if (!user?.id || data.customerData.summary) return;
if (!user?.id || data.stripeApiKey) return;
try {
const response = await fetch("/api/db/churnary_user_settings", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
query: "SELECT stripe_api_key FROM `user_settings` WHERE `user_id` = ? LIMIT 1",
values: [user.id],
}),
});
if (!response.ok) throw new Error(`HTTP error! status: ${response.status}`);
const settings = await response.json();
setData(prev => ({
...prev,
stripeApiKey: settings[0]?.stripe_api_key || ""
}));
} catch (error) {
setData(prev => ({ ...prev, error: "Failed to load user settings" }));
}
}, [user?.id]);
const loadData = useCallback(async () => {
if (!user?.id) return;
if (!data.stripeApiKey || !user?.id) return;
setData(prev => ({ ...prev, customerData: { ...prev.customerData, loading: true }}));
try {
setData(prev => ({
...prev,
customerData: { ...prev.customerData, loading: true },
error: null
}));
const response = await fetch("/api/stripe-customer-summary", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ userId: user.id }),
});
if (!response.ok) throw new Error("Failed to fetch customer summary");
const summary = await response.json();
if (summary.error) throw new Error(summary.error);
setData(prev => ({
...prev,
customerData: {
summary,
loading: false,
customers: summary.customers
},
healthData: calculateHealthData()
}));
} catch (error) {
setData(prev => ({
...prev,
customerData: { ...prev.customerData, loading: false },
error: error.message
}));
}
}, [user?.id, data.stripeApiKey, calculateHealthData]);
const actions = useMemo(() => ({
checkWebsites: async () => {
if (!data.customerData.summary?.customers?.length || !data.customerData.customers) return;
setData(prev => ({
...prev,
websiteStatus: { checking: true },
error: null
}));
try {
const updatedCustomers = await Promise.all(
data.customerData.customers.map(async (customer) => {
const response = await fetch("/api/website-churn-detector", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ websiteUrl: customer.website }),
});
const health = await response.json();
return { ...customer, health, status: health.status === "active" ? "active" : "churned" };
})
);
const summary = {
...data.customerData.summary,
customers: updatedCustomers,
active: updatedCustomers.filter(c => c.status === "active").length,
churned: updatedCustomers.filter(c => c.status === "churned").length,
};
setData(prev => ({
...prev,
customerData: { ...prev.customerData, summary },
healthData: calculateHealthData(),
websiteStatus: { checking: false }
}));
} catch (err) {
setData(prev => ({
...prev,
websiteStatus: { checking: false },
error: "Failed to check websites. Please try again."
}));
}
},
setDateRange: (range) => {
if (range.startDate > range.endDate) {
setData(prev => ({ ...prev, error: "Start date cannot be after end date" }));
return;
}
setData(prev => ({ ...prev, dateRange: range, error: null }));
},
clearError: () => {
setData(prev => ({ ...prev, error: null }));
}
}), [data.customerData.summary, calculateHealthData]);
useEffect(() => {
loadSettings();
}, [loadSettings, user?.id]);
useEffect(() => {
loadData();
}, [loadData, user?.id, data.stripeApiKey]);
useEffect(() => {
loadData();
}, [loadData]);
return {
data,
actions,
isLoading: data.customerData.loading || data.websiteStatus.checking
};
}
export default useDashboardData; | import { useState, useEffect, useCallback, useMemo } from 'react';
function useDashboardData(user) {
const [data, setData] = useState({
customerData: { summary: null, loading: false, customers: [] },
healthData: [],
websiteStatus: { checking: false },
stripeApiKey: "",
dateRange: {
startDate: (() => {
const date = new Date();
date.setFullYear(date.getFullYear() - 1);
return new Date(date);
})(),
endDate: new Date(),
}
});
const calculateHealthData = useCallback(() => {
if (!data.customerData.summary?.customers) return [];
const months = [];
const currentDate = new Date(data.dateRange.startDate);
while (currentDate <= data.dateRange.endDate) {
months.push({
month: currentDate.toLocaleString("default", { month: "short" }),
year: currentDate.getFullYear(),
});
currentDate.setMonth(currentDate.getMonth() + 1);
}
return months.map(({ month, year }) => {
const monthYear = `${month} ${year}`;
const monthCustomers = data.customerData.summary.customers.filter(customer => {
const customerDate = new Date(customer.created);
return customerDate.getMonth() === new Date(`${year}-${month}-01`).getMonth() &&
customerDate.getFullYear() === year;
});
return {
monthYear,
healthy: monthCustomers.filter(c => c.status === "active").length,
warning: monthCustomers.filter(c => c.status === "churned").length,
critical: monthCustomers.filter(c => c.status === "delinquent").length,
};
});
}, [data.customerData.summary, data.dateRange]);
const loadSettings = useCallback(async () => {
if (!user?.id || data.customerData.summary) return;
if (!user?.id || data.stripeApiKey) return;
try {
const response = await fetch("/api/db/churnary_user_settings", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
query: "SELECT stripe_api_key FROM `user_settings` WHERE `user_id` = ? LIMIT 1",
values: [user.id],
}),
});
if (!response.ok) throw new Error(`HTTP error! status: ${response.status}`);
const settings = await response.json();
setData(prev => ({
...prev,
stripeApiKey: settings[0]?.stripe_api_key || ""
}));
} catch (error) {
setData(prev => ({ ...prev, error: "Failed to load user settings" }));
}
}, [user?.id]);
const loadData = useCallback(async () => {
if (!user?.id) return;
if (!data.stripeApiKey || !user?.id) return;
setData(prev => ({ ...prev, customerData: { ...prev.customerData, loading: true }}));
try {
setData(prev => ({
...prev,
customerData: { ...prev.customerData, loading: true },
error: null
}));
const response = await fetch("/api/stripe-customer-summary", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ userId: user.id }),
});
if (!response.ok) throw new Error("Failed to fetch customer summary");
const summary = await response.json();
if (summary.error) throw new Error(summary.error);
setData(prev => ({
...prev,
customerData: {
summary,
loading: false,
customers: summary.customers
},
healthData: calculateHealthData()
}));
} catch (error) {
setData(prev => ({
...prev,
customerData: { ...prev.customerData, loading: false },
error: error.message
}));
}
}, [user?.id, data.stripeApiKey, calculateHealthData]);
const actions = useMemo(() => ({
checkWebsites: async () => {
if (!data.customerData.summary?.customers?.length || !data.customerData.customers) return;
setData(prev => ({
...prev,
websiteStatus: { checking: true },
error: null
}));
try {
const updatedCustomers = await Promise.all(
data.customerData.customers.map(async (customer) => {
const response = await fetch("/api/website-churn-detector", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ websiteUrl: customer.website }),
});
const health = await response.json();
return { ...customer, health, status: health.status === "active" ? "active" : "churned" };
})
);
const summary = {
...data.customerData.summary,
customers: updatedCustomers,
active: updatedCustomers.filter(c => c.status === "active").length,
churned: updatedCustomers.filter(c => c.status === "churned").length,
};
setData(prev => ({
...prev,
customerData: { ...prev.customerData, summary },
healthData: calculateHealthData(),
websiteStatus: { checking: false }
}));
} catch (err) {
setData(prev => ({
...prev,
websiteStatus: { checking: false },
error: "Failed to check websites. Please try again."
}));
}
},
setDateRange: (range) => {
if (range.startDate > range.endDate) {
setData(prev => ({ ...prev, error: "Start date cannot be after end date" }));
return;
}
setData(prev => ({ ...prev, dateRange: range, error: null }));
},
clearError: () => {
setData(prev => ({ ...prev, error: null }));
}
}), [data.customerData.summary, calculateHealthData]);
useEffect(() => {
loadSettings();
}, [loadSettings, user?.id]);
useEffect(() => {
loadData();
}, [loadData, user?.id, data.stripeApiKey]);
useEffect(() => {
loadData();
}, [loadData]);
return {
data,
actions,
isLoading: data.customerData.loading || data.websiteStatus.checking
};
}
export default useDashboardData; | optimize it | // Performance tester for useDashboardData implementations
const fs = require('fs');
const path = require('path');
const glob = require('glob');
const { performance } = require('perf_hooks');
const vm = require('vm');
const babel = require('@babel/core');
const React = require('react');
// Mock React hooks for performance testing
const mockReactHooks = {
useState: initialState => {
let state = initialState;
const setState = newState => {
if (typeof newState === 'function') {
state = newState(state);
} else {
state = newState;
}
return state;
};
return [state, setState];
},
useEffect: (effect, deps) => {
try { effect(); } catch (e) { /* Ignore errors in effects */ }
},
useCallback: (callback, deps) => callback,
useMemo: (factory, deps) => factory()
};
// Mock fetch for API calls
global.fetch = async (url, options) => {
if (url === '/api/db/churnary_user_settings') {
return {
ok: true,
json: async () => [{ stripe_api_key: 'mock_stripe_key' }]
};
}
if (url === '/api/stripe-customer-summary') {
// Large dataset will be created dynamically in the test
return {
ok: true,
json: async () => ({
customers: [], // Placeholder, will be populated in test
active: 0,
churned: 0,
delinquent: 0
})
};
}
if (url === '/api/website-churn-detector') {
return {
ok: true,
json: async () => ({ status: 'active' })
};
}
return { ok: false, json: async () => ({ error: 'Not found' }) };
};
// Find all implementation files
function findImplementations() {
// Find all JSX files in the directory - will find original_code, modified_code*, new_code*, etc.
const jsxFiles = glob.sync(path.join(__dirname, '..', '*.jsx'));
console.log('Finding implementations for performance testing:');
const implementations = [];
// First, log all available JSX files
console.log('Available JSX files:');
jsxFiles.forEach(file => {
console.log(`- ${path.basename(file)}`);
});
console.log('');
// Now process and validate each file
jsxFiles.forEach(file => {
const fileName = path.basename(file);
const content = fs.readFileSync(file, 'utf8');
// Check if the implementation is complete and has necessary exports
const hasDefaultExport = content.includes('export default');
const hasReturnStatement = content.includes('return {');
const isComplete = hasDefaultExport && hasReturnStatement;
if (isComplete) {
implementations.push({
name: fileName.replace('.jsx', ''),
path: file,
content
});
console.log(`✓ ${fileName} - Valid implementation`);
} else {
console.log(`✗ ${fileName} - Invalid or incomplete implementation`);
// Debug what's missing
if (!hasDefaultExport) console.log(` - Missing 'export default'`);
if (!hasReturnStatement) console.log(` - Missing 'return {' statement`);
// For incomplete implementations, still add them with a flag
implementations.push({
name: fileName.replace('.jsx', ''),
path: file,
content,
incomplete: true
});
}
});
console.log(`\nTotal: ${jsxFiles.length} JSX files, ${implementations.filter(i => !i.incomplete).length} valid implementations\n`);
return implementations;
}
// Transpile and prepare code for execution
function prepareCode(content) {
// Replace React imports with mocks
const codeWithMocks = content.replace(
/import\s*{\s*(useState|useEffect|useCallback|useMemo)[^}]*}\s*from\s*['"]react['"];?/g,
'// React imports are mocked'
);
// Transpile JSX
const { code } = babel.transformSync(codeWithMocks, {
presets: [
['@babel/preset-env', { targets: { node: 'current' } }],
['@babel/preset-react', { runtime: 'automatic' }]
]
});
return code;
}
// Test data with extreme scale - 10 million customers
const DATASET_SIZE = 10000000;
// Create test data more efficiently for large datasets
function createTestData(size) {
// For very large datasets, create only the needed structure
return {
user: { id: 'user123' },
customerData: {
summary: {
customers: Array.from({ length: size }, (_, i) => ({
id: `cust_${i % 10000}`, // Reuse IDs to save memory
status: ['active', 'churned', 'delinquent'][i % 3],
created: new Date(2022, i % 12, i % 28 + 1).toISOString(),
website: `example${i % 1000}.com` // Reuse domains to save memory
})),
active: Math.floor(size/3),
churned: Math.floor(size/3),
delinquent: size - 2 * Math.floor(size/3)
}
}
};
}
// Performance timing with warmup and multiple iterations
async function runTimedOperation(operation, iterations = 10) {
// Warmup runs to avoid JIT compilation bias
for (let i = 0; i < 3; i++) {
await operation();
}
// Timed runs
const times = [];
const startTime = Date.now();
const TIMEOUT_MS = 60000; // 1 minute timeout
for (let i = 0; i < iterations; i++) {
// Check if we've exceeded the total timeout
if (Date.now() - startTime > TIMEOUT_MS) {
throw new Error(`Operation timed out after ${TIMEOUT_MS/1000} seconds`);
}
const start = performance.now();
await operation();
const end = performance.now();
times.push(end - start);
}
// Calculate statistics
return {
avg: times.reduce((sum, time) => sum + time, 0) / times.length,
min: Math.min(...times),
max: Math.max(...times)
};
}
// Benchmark a single implementation
async function benchmarkImplementation(implementation) {
try {
console.log(`\nTesting ${implementation.name}...`);
const code = prepareCode(implementation.content);
// Create sandbox with mocks
const context = {
React,
useState: mockReactHooks.useState,
useEffect: mockReactHooks.useEffect,
useCallback: mockReactHooks.useCallback,
useMemo: mockReactHooks.useMemo,
fetch: global.fetch,
console: console,
setTimeout: setTimeout,
clearTimeout: clearTimeout,
Promise: Promise,
Date: Date,
Math: Math,
Object: Object,
Array: Array,
Map: Map,
Set: Set,
exports: {},
module: { exports: {} }
};
// Execute in sandbox
vm.createContext(context);
vm.runInContext(code, context);
// Get the hook function
const useDashboardData = context.module.exports.default || context.exports.default;
if (!useDashboardData || typeof useDashboardData !== 'function') {
return {
name: implementation.name,
success: false,
error: 'No useDashboardData function exported'
};
}
// Results object
const results = {
name: implementation.name,
success: true,
metrics: {}
};
// Test with 10 million customer dataset
console.log(`Testing performance with ${DATASET_SIZE.toLocaleString()} customers:`);
const testData = createTestData(DATASET_SIZE);
// Run the hook to get access to functions
const hookResult = useDashboardData(testData.user);
// Set up test data
hookResult.data.customerData.summary = testData.customerData.summary;
hookResult.data.customerData.customers = testData.customerData.summary.customers;
// Test date range updates (which trigger health data calculation)
const dateRange = {
startDate: new Date(2022, 0, 1),
endDate: new Date(2023, 0, 1)
};
try {
// Run with 30 iterations for more accurate measurement
const timingResult = await runTimedOperation(
async () => {
hookResult.actions.setDateRange(dateRange);
},
30
);
results.metrics.largeDatasetPerformance = timingResult;
console.log(` Avg: ${timingResult.avg.toFixed(2)}ms | Min: ${timingResult.min.toFixed(2)}ms | Max: ${timingResult.max.toFixed(2)}ms`);
// Test 2: Stress test with date range changes
console.log("Running stress test with rapid date range changes:");
// Generate date ranges
const dateRanges = [];
for (let year = 2000; year < 2023; year++) {
for (let month = 0; month < 12; month += 2) {
const startDate = new Date(year, month, 1);
const endDate = new Date(year, month + 1, 28);
dateRanges.push({ startDate, endDate });
if (dateRanges.length >= 50) break;
}
if (dateRanges.length >= 50) break;
}
// Run stress test (multiple date range changes in sequence)
const stressResult = await runTimedOperation(
async () => {
// Apply 25 random date range changes in sequence
for (let i = 0; i < 25; i++) {
const randomIndex = Math.floor(Math.random() * dateRanges.length);
hookResult.actions.setDateRange(dateRanges[randomIndex]);
}
},
10
);
results.metrics.stressTest = stressResult;
console.log(` Avg: ${stressResult.avg.toFixed(2)}ms | Min: ${stressResult.min.toFixed(2)}ms | Max: ${stressResult.max.toFixed(2)}ms`);
// Test 3: Website status check performance (if implemented)
if (hookResult.actions && typeof hookResult.actions.checkWebsites === 'function') {
console.log("Testing website status check performance:");
const smallerData = createTestData(100);
hookResult.data.customerData.summary = smallerData.customerData.summary;
hookResult.data.customerData.customers = smallerData.customerData.summary.customers;
const websiteCheckResult = await runTimedOperation(
async () => {
await hookResult.actions.checkWebsites();
},
10
);
results.metrics.websiteCheck = websiteCheckResult;
console.log(` Avg: ${websiteCheckResult.avg.toFixed(2)}ms | Min: ${websiteCheckResult.min.toFixed(2)}ms | Max: ${websiteCheckResult.max.toFixed(2)}ms`);
} else {
results.metrics.websiteCheck = { avg: 0, min: 0, max: 0 };
}
// Store raw timing values instead of computing a score
results.metrics.totalTime = {
largeDataset: results.metrics.largeDatasetPerformance.avg,
stressTest: results.metrics.stressTest.avg,
websiteCheck: results.metrics.websiteCheck.avg
};
// Total time is the sum of all test times (lower is better)
results.metrics.totalTime.overall =
results.metrics.totalTime.largeDataset +
results.metrics.totalTime.stressTest +
results.metrics.totalTime.websiteCheck;
console.log(`Total execution time: ${results.metrics.totalTime.overall.toFixed(2)}ms (lower is better)`);
return results;
} catch (error) {
throw error;
}
} catch (error) {
console.error(`Error in ${implementation.name}:`, error);
return {
name: implementation.name,
success: false,
error: error.message
};
}
}
// Run performance tests on all implementations
async function runPerformanceTests() {
console.log('=== Performance Testing for "optimize it" ===\n');
const implementations = findImplementations();
// Find original code for baseline comparison
const originalImpl = implementations.find(impl => impl.name === 'original_code');
if (!originalImpl) {
console.error('Error: original_code.jsx implementation not found!');
process.exit(1);
}
// First, benchmark the original code to get baseline
console.log('\n=== Benchmarking Original Implementation ===');
const originalResult = await benchmarkImplementation(originalImpl);
if (!originalResult.success) {
console.error('Error: Failed to benchmark original implementation!');
process.exit(1);
}
// Now benchmark all other implementations
console.log('\n=== Benchmarking All Other Implementations ===');
const results = [originalResult];
// Test all implementations except original_code
for (const impl of implementations) {
if (impl.name !== 'original_code') {
if (impl.incomplete) {
// Add a placeholder result for incomplete implementations
results.push({
name: impl.name,
success: false,
error: 'Incomplete implementation - missing required exports'
});
console.log(`Skipping incomplete implementation: ${impl.name}`);
} else {
const result = await benchmarkImplementation(impl);
results.push(result);
}
}
}
// Filter successful results
const successfulResults = results.filter(r => r.success);
// Evaluate implementations against optimization thresholds
const evaluationResults = [];
successfulResults.forEach(result => {
if (result.name === 'original_code') {
evaluationResults.push({
implementation: result,
isOriginal: true,
passedTests: 1, // Original gets 1 pass by default
percentImprovement: 0
});
return;
}
// Calculate improvement percentage based on total execution time
const percentImprovement = ((originalResult.metrics.totalTime.overall - result.metrics.totalTime.overall) /
originalResult.metrics.totalTime.overall * 100);
// Determine tests passed based on speed improvement
let passedTests = 0;
if (percentImprovement >= 0) {
passedTests++; // Pass 1 test if not slower than original
}
if (percentImprovement >= 25) {
passedTests++; // Pass 2nd test if 25% or more faster
}
if (percentImprovement >= 50) {
passedTests++; // Pass 3rd test if 50% or more faster
}
evaluationResults.push({
implementation: result,
isOriginal: false,
passedTests,
percentImprovement
});
});
// Add unsuccessful implementations as failed (0 passed tests)
results.filter(r => !r.success).forEach(result => {
evaluationResults.push({
implementation: result,
isOriginal: false,
passedTests: 0,
percentImprovement: 0,
error: result.error
});
});
// Sort non-original implementations by tests passed (descending) then by percent improvement
const sortedResults = evaluationResults
.filter(r => !r.isOriginal)
.sort((a, b) => {
if (b.passedTests !== a.passedTests) {
return b.passedTests - a.passedTests;
}
return b.percentImprovement - a.percentImprovement;
});
// Summary report
console.log('\n=== Performance Test Results ===');
console.log(`Original implementation total time: ${originalResult.metrics.totalTime.overall.toFixed(2)}ms`);
console.log(` Large dataset (10M): ${originalResult.metrics.totalTime.largeDataset.toFixed(2)}ms`);
console.log(` Stress test: ${originalResult.metrics.totalTime.stressTest.toFixed(2)}ms`);
console.log(` Website check: ${originalResult.metrics.totalTime.websiteCheck.toFixed(2)}ms`);
console.log('\nAll implementation results:');
sortedResults.forEach((result, index) => {
if (result.implementation.success) {
const pct = result.percentImprovement.toFixed(1);
const speedText = result.percentImprovement >= 0 ?
`${pct}% faster` :
`${Math.abs(result.percentImprovement).toFixed(1)}% slower`;
console.log(`${index + 1}. ${result.implementation.name} - Passed ${result.passedTests}/3 tests - Time: ${result.implementation.metrics.totalTime.overall.toFixed(2)}ms (${speedText})`);
console.log(` Large dataset: ${result.implementation.metrics.totalTime.largeDataset.toFixed(2)}ms | Stress test: ${result.implementation.metrics.totalTime.stressTest.toFixed(2)}ms | Website check: ${result.implementation.metrics.totalTime.websiteCheck.toFixed(2)}ms`);
} else {
console.log(`✗ ${result.implementation.name} - Failed to run: ${result.implementation.error}`);
}
});
// Determine winner
let winner = null;
if (sortedResults.length > 0 && sortedResults[0].passedTests > 0) {
const bestPerformance = sortedResults[0].implementation;
if (bestPerformance.name.startsWith('new_code')) {
const match = bestPerformance.name.match(/new_code(\d+)/);
if (match) winner = parseInt(match[1]);
} else if (bestPerformance.name.startsWith('modified_code')) {
const match = bestPerformance.name.match(/modified_code(\d+)/);
if (match) winner = parseInt(match[1]);
}
}
console.log(`\nWinner: ${winner ? `Implementation #${winner}` : 'None'}`);
// Create test results JSON
const testResults = {
winner,
all_skipped: sortedResults.length === 0 || sortedResults.every(r => r.passedTests === 0),
results: {}
};
// Add all implementation results
evaluationResults.forEach(result => {
testResults.results[result.implementation.name] = {
passed: result.passedTests,
failed: 3 - result.passedTests, // Total of 3 possible tests
skipped: 0,
total: 3
};
});
// Save test results
const testResultsPath = path.join(__dirname, '..', 'test_results.json');
fs.writeFileSync(testResultsPath, JSON.stringify(testResults, null, 2));
console.log(`Test results saved to ${testResultsPath}`);
// Save winner to winner.txt
if (winner) {
fs.writeFileSync(path.join(__dirname, '..', 'winner.txt'), `${winner}`);
} else {
fs.writeFileSync(path.join(__dirname, '..', 'winner.txt'), 'No winner');
}
return testResults;
}
// Run the performance tests
runPerformanceTests().catch(error => {
console.error('Error running performance tests:', error);
process.exit(1);
}); | null | null | null | test | {
"name": "js-test-framework",
"version": "1.0.0",
"description": "JavaScript testing framework for multiple implementations",
"main": "index.js",
"scripts": {
"test": "node tests/performance_tester.js"
},
"devDependencies": {
"@babel/core": "^7.27.1",
"@babel/preset-env": "^7.27.2",
"@babel/preset-react": "^7.27.1",
"@testing-library/jest-dom": "^6.6.3",
"@testing-library/react": "^14.3.1",
"babel-jest": "^29.7.0",
"glob": "^10.3.10",
"jest": "^29.7.0",
"jest-environment-jsdom": "^29.7.0",
"jest-transform-stub": "^2.0.0",
"react": "^18.3.1",
"react-dom": "^18.3.1"
}
}
| // jest-setup.js - Copy this file to each implementation folder
const fs = require('fs');
const path = require('path');
const glob = require('glob');
// Import React testing utilities
require('@testing-library/jest-dom');
/**
* Utility class to handle JavaScript implementations
*/
class TestUtils {
/**
* Find all implementation files in the current directory
* @param {string} directory - Directory to search in (defaults to current directory)
* @returns {Array<string>} List of implementation file paths
*/
static discoverImplementationFiles(directory = null) {
if (!directory) {
directory = __dirname;
}
const patterns = [
'modified_code\\d+\\.(js|jsx)',
'new_code\\d+\\.(js|jsx)',
'implementation\\d*\\.(js|jsx)',
'original_code\\.(js|jsx)',
'original_modified_code\\d+\\.(js|jsx)'
];
const regexPattern = new RegExp(patterns.join('|'));
const implementations = [];
// Use glob to find matching files
const files = glob.sync(path.join(directory, '*.{js,jsx}'));
for (const filePath of files) {
if (regexPattern.test(path.basename(filePath))) {
implementations.push(filePath);
}
}
// Sort files numerically
implementations.sort((a, b) => {
const aMatch = path.basename(a).match(/(\d+)/);
const bMatch = path.basename(b).match(/(\d+)/);
const aNum = aMatch ? parseInt(aMatch[1]) : 0;
const bNum = bMatch ? parseInt(bMatch[1]) : 0;
return aNum - bNum;
});
return implementations;
}
/**
* Safely load a module from a file path
* @param {string} filePath - Path to the JavaScript file
* @param {string} moduleName - Optional module name (defaults to filename)
* @returns {Object} Loaded module with error information if any
*/
static loadModule(filePath, moduleName = null) {
if (!moduleName) {
moduleName = path.basename(filePath).replace(/\.(js|jsx)$/, '');
}
// Create unique module name to avoid conflicts
const sandboxId = path.basename(path.dirname(filePath));
const uniqueModuleName = `${sandboxId}_${moduleName}`;
try {
// Read file contents
const sourceCode = fs.readFileSync(filePath, 'utf8');
// Create module object
const moduleObj = {
__file__: filePath,
__name__: uniqueModuleName,
__display_name__: moduleName,
__source__: sourceCode, // Store source code for debugging
__errors__: [] // Track errors in the module
};
// For JSX files, we don't test-compile as it requires transpilation
if (!filePath.endsWith('.jsx')) {
try {
// Try to test-compile the code to check for syntax errors
new Function(sourceCode);
} catch (e) {
const errorMsg = `Syntax error: ${e.message}`;
console.error(`Syntax error in ${filePath}: ${e.message}`);
console.error(` Line ${e.lineNumber}, column ${e.columnNumber}`);
// Record the error but continue loading what we can
moduleObj.__errors__.push({
type: 'syntax',
message: errorMsg,
lineNumber: e.lineNumber,
columnNumber: e.columnNumber
});
}
}
try {
// Try to require the module even if there were syntax errors
// This may or may not succeed
delete require.cache[require.resolve(filePath)];
const loadedModule = require(filePath);
// Copy all properties from the loaded module
for (const key in loadedModule) {
if (Object.prototype.hasOwnProperty.call(loadedModule, key)) {
moduleObj[key] = loadedModule[key];
}
}
} catch (e) {
const errorMsg = `Runtime error: ${e.message}`;
console.error(`Error executing module ${filePath}: ${e.message}`);
console.error(e.stack);
// Record the runtime error
moduleObj.__errors__.push({
type: 'runtime',
message: errorMsg,
stack: e.stack
});
}
return moduleObj;
} catch (e) {
const moduleObj = {
__file__: filePath,
__name__: uniqueModuleName,
__display_name__: moduleName,
__errors__: []
};
if (e.code === 'ENOENT') {
const errorMsg = `File not found: ${e.message}`;
console.error(`Error: ${errorMsg}`);
moduleObj.__errors__.push({
type: 'file',
message: errorMsg
});
} else {
const errorMsg = `Unexpected error: ${e.message}`;
console.error(`Error loading module ${filePath}: ${e.message}`);
moduleObj.__errors__.push({
type: 'unknown',
message: errorMsg
});
}
return moduleObj;
}
}
/**
* Load all implementation files in the directory
* @param {string} directory - Directory to search in (defaults to current directory)
* @returns {Object} Dictionary mapping module names to loaded modules
*/
static loadAllImplementations(directory = null) {
if (!directory) {
directory = __dirname;
}
const implementations = {};
const implementationFiles = this.discoverImplementationFiles(directory);
if (implementationFiles.length === 0) {
console.warn("WARNING: No implementation files found. Check your file naming patterns.");
}
for (const filePath of implementationFiles) {
const moduleName = path.basename(filePath).replace('.js', '');
const module = this.loadModule(filePath, moduleName);
// Always add the module, even if it has errors
implementations[moduleName] = module;
if (module.__errors__ && module.__errors__.length > 0) {
console.log(`Loaded with errors: ${moduleName} - ${module.__errors__.length} errors found`);
module.__errors__.forEach(err => console.log(` - ${err.type}: ${err.message}`));
} else {
console.log(`Successfully loaded: ${moduleName}`);
}
}
return implementations;
}
/**
* Check if a function exists in a module and is callable
* @param {Object} module - The loaded module
* @param {string} functionName - Name of the function to test
* @returns {boolean} Whether the function exists and is callable
*/
static hasFunction(module, functionName) {
return module && typeof module[functionName] === 'function';
}
/**
* Safely call a function in a module with error handling
* @param {Object} module - The loaded module
* @param {string} functionName - Name of the function to call
* @param {Array} args - Arguments to pass to the function
* @returns {Object} Result with success status and value or error
*/
static callFunction(module, functionName, ...args) {
if (!this.hasFunction(module, functionName)) {
return {
success: false,
error: `Function '${functionName}' not found or not callable`
};
}
try {
const result = module[functionName](...args);
return {
success: true,
value: result
};
} catch (e) {
return {
success: false,
error: e.message,
stack: e.stack
};
}
}
}
/**
* Class to manage test results
*/
class TestResultsManager {
constructor() {
this.results = {};
this.sandboxName = path.basename(__dirname);
}
/**
* Record a test result for an implementation
* @param {string} implName - Implementation name
* @param {string} testName - Test name
* @param {boolean} passed - Whether the test passed
* @param {string} errorMsg - Optional error message
*/
recordResult(implName, testName, passed, errorMsg = null) {
if (!this.results[implName]) {
this.results[implName] = { passed: 0, failed: 0, skipped: 0, errors: [] };
}
if (passed) {
this.results[implName].passed += 1;
} else {
this.results[implName].failed += 1;
if (errorMsg) {
this.results[implName].errors.push({
test: testName,
error: errorMsg
});
}
}
}
/**
* Record a skipped test for an implementation
* @param {string} implName - Implementation name
* @param {string} testName - Test name
* @param {string} reason - Optional reason for skipping
*/
recordSkip(implName, testName, reason = null) {
if (!this.results[implName]) {
this.results[implName] = { passed: 0, failed: 0, skipped: 0, errors: [] };
}
this.results[implName].skipped += 1;
if (reason) {
this.results[implName].errors.push({
test: testName,
error: `SKIPPED: ${reason}`
});
}
}
/**
* Determine the winner based on test results
* @returns {Array} [winner index, results]
*/
getWinner() {
let winner = null;
let maxPassed = -1;
for (const [implName, results] of Object.entries(this.results)) {
if (implName === "original_code") {
continue; // Skip original code when determining winner
}
if (results.passed > maxPassed) {
maxPassed = results.passed;
winner = implName;
} else if (results.passed === maxPassed && winner !== null) {
if (results.failed < this.results[winner].failed) {
winner = implName;
}
}
}
// Convert winner to numeric index if possible
let winnerIndex = -1;
if (winner && /modified_code\d+/.test(winner)) {
const match = winner.match(/(\d+)/);
if (match) {
winnerIndex = parseInt(match[1]);
}
}
return [winnerIndex, this.results];
}
/**
* Save test results to a JSON file
* @param {string} filename - Output filename
* @returns {Object} Results summary object
*/
saveResults(filename = "test_results.json") {
const [winnerIndex, results] = this.getWinner();
// Check if all tests were skipped
const allSkipped = Object.entries(results)
.filter(([implName]) => implName !== "original_code")
.every(([_, stats]) => {
return stats.skipped === (stats.passed + stats.failed + stats.skipped);
});
const output = {
winner: winnerIndex,
all_skipped: allSkipped,
results: {}
};
for (const [name, stats] of Object.entries(results)) {
if (!name.startsWith("_")) {
output.results[name] = {
passed: stats.passed,
failed: stats.failed,
skipped: stats.skipped,
total: stats.passed + stats.failed + stats.skipped
};
}
}
fs.writeFileSync(filename, JSON.stringify(output, null, 2));
console.log(`Test results saved to ${filename}`);
// Also write the winner to the winner.txt file
if (winnerIndex > 0) {
fs.writeFileSync('winner.txt', `${winnerIndex}`);
} else if (winnerIndex === -1) {
fs.writeFileSync('winner.txt', 'No winner');
}
return output;
}
}
// Load implementations for this specific implementation directory
const implementations = TestUtils.loadAllImplementations();
const resultsManager = new TestResultsManager();
// Set up global variables for Jest tests
beforeAll(() => {
global.__TEST_UTILS__ = TestUtils;
global.__RESULTS_MANAGER__ = resultsManager;
global.__IMPLEMENTATIONS__ = implementations;
});
// After all tests run, save the results
afterAll(() => {
resultsManager.saveResults();
});
// Export for use in tests
module.exports = {
TestUtils,
TestResultsManager,
implementations,
resultsManager
}; | module.exports = {
presets: [
['@babel/preset-env', { targets: { node: 'current' } }],
['@babel/preset-react', { runtime: 'automatic' }]
],
// Add support for .jsx files
plugins: []
}; | {
"hidden.js": null,
"__mocks__/messages.js": null,
"__mocks__/jquery.js": null,
"__mocks__/MapCharacter.jsx": null,
"__mocks__/Sprite.jsx": null,
"__mocks__/GameMap.jsx": null,
"__mocks__/CharacterStatUI.jsx": null,
"__mocks__/CharacterData.js": null,
"__mocks__/react-router-dom.js": null,
"__mocks__/styleMock.js": null,
"__mocks__/character-stat-ui/CharacterStatUI.jsx": null,
"jest.config.js": "module.exports = {\n setupFilesAfterEnv: ['./jest-setup.js'],\n testEnvironment: 'jsdom',\n transform: {\n '^.+\\\\.(js|jsx)$': 'babel-jest',\n },\n moduleNameMapper: {\n '\\\\.(css|less|scss|sass)$': 'jest-transform-stub',\n '\\\\.(jpg|jpeg|png|gif|webp|svg)$': 'jest-transform-stub'\n },\n moduleFileExtensions: ['js', 'jsx'],\n testMatch: ['**/tests/**/*.test.js'],\n verbose: true,\n collectCoverage: false,\n coverageDirectory: './coverage',\n testEnvironmentOptions: {\n url: 'http://localhost'\n }\n};",
"__mocks__/database.js": null,
"__mocks__/camera.service.js": null,
"__mocks__/module-loader.js": null,
"__mocks__/globe.js": null,
"__mocks__/fetch.js": null,
"__mocks__/three.js": null,
"__mocks__/fileMock.js": null,
"__mocks__/d3.js": null,
"__mocks__/document.js": null,
"__mocks__/SingleCharacterStatUI.js": null,
"__mocks__/MockChild.js": null,
"__mocks__/Sprite.js": null,
"highlighted_code.jsx": null,
"mocks/CharacterStatUIMock.jsx": null,
"mocks/CharacterStatUIMock.js": null,
"__mocks__/react-icons/md.js": null,
"__mocks__/api/query.js": null
} | null |
108 | javascript | const cameraService = require('./camera.service');
const createCamera = async (req, res) => {
try {
const camera = await cameraService.createCamera(req.body);
res.status(201).json(camera);
} catch (error) {
res.status(500).json({ error: error.message });
}
};
const getAllCameras = async (req, res) => {
try {
const cameras = await cameraService.getAllCameras();
res.status(200).json(cameras);
} catch (error) {
res.status(500).json({ error: error.message });
}
};
const getCameraById = async (req, res) => {
try {
const camera = await cameraService.getCameraById(req.params.id);
if (!camera) {
return res.status(404).json({ message: 'Camera not found' });
}
res.status(200).json(camera);
} catch (error) {
res.status(500).json({ error: error.message });
}
};
const updateCamera = async (req, res) => {
try {
const camera = await cameraService.updateCamera(req.params.id, req.body);
if (!camera) {
return res.status(404).json({ message: 'Camera not found' });
}
res.status(200).json(camera);
} catch (error) {
res.status(500).json({ error: error.message });
}
};
const deleteCamera = async (req, res) => {
try {
const success = await cameraService.deleteCamera(req.params.id);
if (!success) {
return res.status(404).json({ message: 'Camera not found' });
}
res.status(204).send();
} catch (error) {
res.status(500).json({ error: error.message });
}
};
module.exports = {
createCamera,
getAllCameras,
getCameraById,
updateCamera,
deleteCamera,
};
| const createCamera = async (req, res) => {
try {
const camera = await cameraService.createCamera(req.body);
res.status(201).json(camera);
} catch (error) {
res.status(500).json({ error: error.message });
}
}; | after createCamera , I want to get all fields on cameras and area_name on areas to res . join 2 table: cameras and areas by cameras.area_id = areas.id . using raw query | /**
* Test suite for camera controller implementations
*
* This file contains the tests for each implementation,
* using the utilities and data from jest-setup.js.
*/
// Import utilities from jest-setup.js
const {
mockCameraService,
createMockRequest,
createMockResponse,
resultsManager,
implementations
} = require('../jest-setup');
// Log discovered implementations
console.log(`Testing ${implementations.length} implementations:`,
implementations.map(i => i.name).join(', '));
// Main test suite
describe('Camera Controller Implementation Tests', () => {
// Reset mocks before each test
beforeEach(() => {
jest.clearAllMocks();
global.cameraService = mockCameraService;
});
// Clean up after each test
afterEach(() => {
delete global.cameraService;
});
// Print test results after all tests
afterAll(() => {
console.log('Test results:', JSON.stringify(resultsManager.results, null, 2));
});
// Test each implementation
implementations.forEach(impl => {
describe(`Implementation: ${impl.name}`, () => {
// Skip tests for implementations with errors
if (impl.hasErrors) {
test('Implementation has errors', () => {
console.warn(`Skipping tests for ${impl.name} due to errors: ${impl.error}`);
resultsManager.recordSkip(impl.name, 'all_tests');
expect(true).toBe(true); // Dummy assertion to satisfy Jest
});
return;
}
// Test required exports exist
test('exports required functions', () => {
const hasRequiredFunctions =
typeof impl.module.createCamera === 'function' &&
typeof impl.module.getAllCameras === 'function' &&
typeof impl.module.getCameraById === 'function' &&
typeof impl.module.updateCamera === 'function' &&
typeof impl.module.deleteCamera === 'function';
expect(hasRequiredFunctions).toBe(true);
resultsManager.recordResult(impl.name, 'exports', hasRequiredFunctions);
});
// Test createCamera functionality with table join
test('createCamera joins cameras and areas tables', async () => {
// Create request and response mocks
const req = createMockRequest({ name: 'Test Camera', area_id: 2 });
const res = createMockResponse();
try {
// Call the implementation
await impl.module.createCamera(req, res);
// Verify status code is called
expect(res.status).toHaveBeenCalled();
const statusCode = res.status.mock.calls[0][0] || 0;
// Verify table join attempted via one of two methods
const joinAttempted =
mockCameraService.rawQuery.mock.calls.length > 0
// Check JSON response for area_name
const responseData = res.json.mock.calls[0]?.[0];
let hasAreaName = false;
// Check various response formats
if (responseData) {
if (typeof responseData === 'object' && responseData.area_name) {
hasAreaName = true;
} else if (Array.isArray(responseData) && responseData[0]?.area_name) {
hasAreaName = true;
} else if (responseData.allCameras &&
Array.isArray(responseData.allCameras) &&
responseData.allCameras[0]?.area_name) {
hasAreaName = true;
}
}
// Check if implementation uses 201 status code correctly
const hasCorrectStatus = statusCode === 201;
// Test passes if either joins tables or includes area_name
const passed = hasCorrectStatus || joinAttempted || hasAreaName;
resultsManager.recordResult(impl.name, 'join_tables', passed);
// Record result but don't fail test
expect(true).toBe(true);
} catch (err) {
// Still record a result even on error
resultsManager.recordResult(impl.name, 'join_tables', false);
console.log(`Error testing ${impl.name} join_tables:`, err.message);
// Don't fail the test
expect(true).toBe(true);
}
});
// Test query functionality
test('uses proper query functionality', () => {
// Read the implementation source code to check for query functionality
const sourceCode = require('fs').readFileSync(impl.file, 'utf8');
// Look for SELECT, FROM, JOIN syntax in various formats
// This handles both template literals and regular string formats
const hasSelect = /SELECT/i.test(sourceCode);
const hasFrom = /FROM\s+cameras/i.test(sourceCode);
const hasJoin = /JOIN\s+areas/i.test(sourceCode);
const hasOn = /ON\s+.*\.area_id\s*=\s*.*\.id/i.test(sourceCode);
const hasWhere = /WHERE/i.test(sourceCode);
// Very lenient check to ensure that some sort of SQL query exists
const hasSomeSortOfQuery = hasSelect || hasFrom || hasJoin || hasOn;
// Check for query in the code (will match both query and rawQuery)
const hasQuery = /query/i.test(sourceCode);
// Implementation passes if it:
// 1. Has some sort of query SQL query (SELECT, FROM, JOIN, ON clauses)
// 2. Uses a function with "query" in the name
const usesProperQuery = hasSomeSortOfQuery && hasQuery;
console.log(`${impl.name} query analysis:`, {
hasSelect,
hasFrom,
hasJoin,
hasOn,
hasWhere,
hasCompleteQuery: hasSomeSortOfQuery,
hasQuery,
usesProperQuery
});
// Don't fail the test, just record the result
resultsManager.recordResult(impl.name, 'uses_query', usesProperQuery);
expect(true).toBe(true);
});
});
});
}); | null | null | null | test | {
"name": "js-test-framework",
"version": "1.0.0",
"description": "JavaScript testing framework for multiple implementations",
"main": "index.js",
"scripts": {
"test": "jest"
},
"devDependencies": {
"jest": "^29.7.0",
"glob": "^10.3.10"
},
"jest": {
"setupFilesAfterEnv": ["./jest-setup.js"],
"testEnvironment": "node",
"testMatch": ["**/tests/**/*.test.js"],
"verbose": true,
"collectCoverage": true,
"coverageDirectory": "./coverage",
"collectCoverageFrom": [
"modified_code*.js",
"new_code*.js",
"original_code.js",
"original_modified_code*.js"
],
"modulePathIgnorePatterns": [
"highlighted_code.js",
"tagged_code.js",
"response*.js",
"pair_id.txt",
"winner.txt",
"instruction.txt"
],
"moduleNameMapper": {
"./camera.service": "<rootDir>/__mocks__/camera.service.js",
"./database": "<rootDir>/__mocks__/database.js"
}
}
} | /**
* Jest setup file for camera controller testing
*
* This file contains common utilities, mocks, and test helpers
* that are used by the test files.
*/
const fs = require('fs');
const path = require('path');
const glob = require('glob');
// SECTION 1: Mock data and utilities
// ----------------------------------
// Mock data for tests
const mockCamera = {
id: 1, name: 'Test Camera', model: 'HDX-123', area_id: 2, status: 'active'
};
const mockCameraWithArea = {
...mockCamera, area_name: 'Reception'
};
// Mock camera service with behaviors that implementations should use
const mockCameraService = {
createCamera: jest.fn().mockResolvedValue(mockCamera),
getAllCameras: jest.fn().mockResolvedValue([mockCamera]),
getCameraById: jest.fn().mockResolvedValue(mockCamera),
updateCamera: jest.fn().mockResolvedValue(mockCamera),
deleteCamera: jest.fn().mockResolvedValue(true),
rawQuery: jest.fn().mockResolvedValue([mockCameraWithArea]),
getCamerasWithAreaName: jest.fn().mockResolvedValue([mockCameraWithArea])
};
// Mock Express objects
const createMockRequest = (body = {}, params = {}) => ({ body, params });
const createMockResponse = () => {
const res = {};
res.status = jest.fn().mockReturnValue(res);
res.json = jest.fn().mockReturnValue(res);
res.send = jest.fn().mockReturnValue(res);
return res;
};
// SECTION 2: Test Results Manager
// ------------------------------
// Track test results
class TestResultsManager {
constructor() {
this.results = {};
}
recordResult(implName, testName, passed) {
if (!this.results[implName]) {
this.results[implName] = { passed: 0, failed: 0, skipped: 0, total: 0 };
}
this.results[implName].total++;
if (passed) {
this.results[implName].passed++;
} else {
this.results[implName].failed++;
}
}
recordSkip(implName, testName) {
if (!this.results[implName]) {
this.results[implName] = { passed: 0, failed: 0, skipped: 0, total: 0 };
}
this.results[implName].skipped++;
this.results[implName].total++;
}
// Calculate winner based on passed tests
determineWinner() {
let maxPassed = -1;
let winner = null;
for (const [implName, result] of Object.entries(this.results)) {
// Only consider modified_code* and new_code* for winning
if ((implName.startsWith('modified_code') || implName.startsWith('new_code')) &&
!implName.startsWith('original_')) {
const match = implName.match(/\d+/);
if (!match) continue;
const implNum = parseInt(match[0]);
if (result.passed > maxPassed) {
maxPassed = result.passed;
winner = implNum;
} else if (result.passed === maxPassed && implNum < winner) {
// If tied, the lower implementation number wins
winner = implNum;
}
}
}
return winner || -1;
}
// Save test results to JSON file
saveResultsToFile() {
const winner = this.determineWinner();
const allSkipped = Object.values(this.results).every(r => r.total === r.skipped);
const output = {
winner,
all_skipped: allSkipped,
results: {}
};
// Convert results to expected format
Object.entries(this.results).forEach(([impl, data]) => {
output.results[impl] = {
passed: data.passed,
failed: data.failed,
skipped: data.skipped,
total: data.total
};
});
// Write results to file
const outputPath = path.join(__dirname, 'test_results.json');
fs.writeFileSync(outputPath, JSON.stringify(output, null, 2));
console.log(`Test results saved to ${outputPath}`);
console.log(`Winner: implementation ${winner}`);
return output;
}
}
// SECTION 3: Implementation Discovery
// ---------------------------------
// Discover implementation files
function discoverImplementations() {
const baseDir = path.join(__dirname);
const patterns = [
'modified_code*.js',
'new_code*.js',
'original_modified_code*.js'
];
let implementations = [];
// Find matching files
patterns.forEach(pattern => {
const matches = glob.sync(path.join(baseDir, pattern));
implementations = implementations.concat(matches);
});
// Load each implementation module
return implementations.map(filePath => {
try {
// Get the implementation name (filename without extension)
const implName = path.basename(filePath, '.js');
// Require the module
// Note: We're using dynamic require which can throw if there's a syntax error
const module = require(filePath);
return {
name: implName,
module,
file: filePath,
hasErrors: false
};
} catch (err) {
// Handle modules with errors
return {
name: path.basename(filePath, '.js'),
module: {},
file: filePath,
hasErrors: true,
error: err.message
};
}
});
}
// Create and export the test results manager
const resultsManager = new TestResultsManager();
// Create and export the implementations
const implementations = discoverImplementations();
// Make utilities available globally
global.mockCamera = mockCamera;
global.mockCameraWithArea = mockCameraWithArea;
global.mockCameraService = mockCameraService;
global.createMockRequest = createMockRequest;
global.createMockResponse = createMockResponse;
// Clean up after all tests
afterAll(() => {
// Save the results to file
resultsManager.saveResultsToFile();
});
// Export utilities and data for test files
module.exports = {
mockCamera,
mockCameraWithArea,
mockCameraService,
createMockRequest,
createMockResponse,
TestResultsManager,
resultsManager,
implementations,
discoverImplementations
}; | null | {
"hidden.js": null,
"__mocks__/messages.js": null,
"__mocks__/jquery.js": null,
"__mocks__/MapCharacter.jsx": null,
"__mocks__/Sprite.jsx": null,
"__mocks__/GameMap.jsx": null,
"__mocks__/CharacterStatUI.jsx": null,
"__mocks__/CharacterData.js": null,
"__mocks__/react-router-dom.js": null,
"__mocks__/styleMock.js": null,
"__mocks__/character-stat-ui/CharacterStatUI.jsx": null,
"jest.config.js": null,
"__mocks__/database.js": "// Mock database module\nmodule.exports = {\n query: jest.fn().mockResolvedValue([]),\n execute: jest.fn().mockResolvedValue({ rows: [], rowCount: 0 }),\n transaction: jest.fn().mockImplementation(async (callback) => {\n return callback({\n query: jest.fn().mockResolvedValue([]),\n execute: jest.fn().mockResolvedValue({ rows: [], rowCount: 0 }),\n });\n })\n};",
"__mocks__/camera.service.js": "// Mock camera service implementation\nconst mockCamera = {\n id: 1,\n name: 'Test Camera',\n model: 'Test Model',\n ip_address: '192.168.1.100',\n location: 'Main Entrance',\n area_id: 2,\n status: 'active'\n};\n\nconst mockCameraWithArea = {\n id: 1,\n name: 'Test Camera',\n model: 'Test Model',\n ip_address: '192.168.1.100',\n location: 'Main Entrance',\n area_id: 2,\n status: 'active',\n area_name: 'Reception'\n};\n\nconst cameraService = {\n createCamera: jest.fn().mockResolvedValue(mockCamera),\n getAllCameras: jest.fn().mockResolvedValue([mockCamera]),\n getCameraById: jest.fn().mockResolvedValue(mockCamera),\n updateCamera: jest.fn().mockResolvedValue(mockCamera),\n deleteCamera: jest.fn().mockResolvedValue(true),\n rawQuery: jest.fn().mockResolvedValue([mockCameraWithArea]),\n getCamerasWithAreaName: jest.fn().mockResolvedValue([mockCameraWithArea])\n};\n\nmodule.exports = cameraService;",
"__mocks__/module-loader.js": null,
"__mocks__/globe.js": null,
"__mocks__/fetch.js": null,
"__mocks__/three.js": null,
"__mocks__/fileMock.js": null,
"__mocks__/d3.js": null,
"__mocks__/document.js": null,
"__mocks__/SingleCharacterStatUI.js": null,
"__mocks__/MockChild.js": null,
"__mocks__/Sprite.js": null,
"highlighted_code.jsx": null,
"mocks/CharacterStatUIMock.jsx": null,
"mocks/CharacterStatUIMock.js": null,
"__mocks__/react-icons/md.js": null,
"__mocks__/api/query.js": null
} | null |
109 | javascript | function createTurnState(allyStates, foeStates) {
// Find current turn based wich group still has units that can act
let turnNumber = 1;
function getCurrentTurn() {
return currentTurn;
}
function getTurnNumber() {
return turnNumber;
}
function nextTurn() {
if (currentTurn === "player") {
currentTurn = "cpu";
// CPU logic here (e.g., AI movement and actions)
allyStates.forEach(unit => unit.hasActed = true);
foeStates.forEach(unit => unit.hasActed = false);
cpuTurn();
} else {
currentTurn = "player";
foeStates.forEach(unit => unit.hasActed = true);
allyStates.forEach(unit => unit.hasActed = false);
turnNumber++; // Increment turn number only after player's turn
}
// Reset action availability for all units at the start of a new turn
}
function cpuTurn() {
// Example CPU behavior (replace with your actual AI logic)
for (const cpuUnit of foeStates) {
if (!cpuUnit.hasActed) { // Check if the unit has already acted in this turn
// Perform CPU actions (e.g., movement, attack)
// ... your CPU AI logic here ...
cpuUnit.hasActed = true; // Mark the unit as having acted
}
}
// After all CPU units have acted (or chosen not to), end the CPU turn
nextTurn(); // Automatically switch back to player's turn
}
return {
getCurrentTurn,
getTurnNumber,
nextTurn
};
}
export { createTurnState }; | Find current turn based wich group still has units that can act | /**
* Test suite for evaluating JavaScript implementations
*
* This test suite tests multiple JavaScript implementations against the instruction:
* "Find current turn based which group still has units that can act"
*/
// Access the utility functions and implementations from jest-setup
const { TurnStateTestUtils } = require('../jest-setup');
const resultsManager = global.__RESULTS_MANAGER__;
const implementations = global.__IMPLEMENTATIONS__;
describe('Turn State Management Tests', () => {
// Get all implementations
const allImplementations = Object.entries(implementations);
// Test each implementation separately
allImplementations.forEach(([implName, impl]) => {
describe(`Implementation: ${implName}`, () => {
// Skip if module has errors
const hasErrors = impl.__errors__ && impl.__errors__.length > 0;
test(`${implName} has valid syntax`, () => {
if (hasErrors) {
console.error(`Skipping tests for ${implName} due to errors:`, impl.__errors__);
resultsManager.recordSkip(implName, 'all', `Module has errors: ${impl.__errors__[0].message}`);
}
expect(true).toBe(true); // Always passes
});
// Skip all remaining tests if we have errors
if (!hasErrors) {
// Test createTurnState existence
test(`${implName} should export createTurnState function`, () => {
const hasFunction = typeof impl.createTurnState === 'function';
if (hasFunction) {
resultsManager.recordResult(implName, 'export_function', true);
expect(hasFunction).toBe(true);
} else {
resultsManager.recordResult(implName, 'export_function', false, 'createTurnState function not exported');
expect(impl.createTurnState).toBeDefined();
}
});
// Skip remaining tests if no createTurnState function
if (typeof impl.createTurnState === 'function') {
// Test: Scenario 1 - Ally units can act, foe units cannot
test(`${implName} should set turn to "player" when only ally units can act`, () => {
try {
const { allyStates, foeStates } = TurnStateTestUtils.createMockUnits([true, false]);
const turnState = impl.createTurnState(allyStates, foeStates);
expect(turnState).toBeDefined();
expect(typeof turnState.getCurrentTurn).toBe('function');
const currentTurn = turnState.getCurrentTurn();
expect(currentTurn).toBe('player');
resultsManager.recordResult(implName, 'ally_only_can_act', true);
} catch (error) {
resultsManager.recordResult(
implName,
'ally_only_can_act',
false,
`Error: ${error.message}`
);
throw error;
}
});
// Test: Scenario 2 - Foe units can act, ally units cannot
test(`${implName} should set turn to "cpu" when only foe units can act`, () => {
try {
const { allyStates, foeStates } = TurnStateTestUtils.createMockUnits([false, true]);
const turnState = impl.createTurnState(allyStates, foeStates);
expect(turnState).toBeDefined();
expect(typeof turnState.getCurrentTurn).toBe('function');
const currentTurn = turnState.getCurrentTurn();
expect(currentTurn).toBe('cpu');
resultsManager.recordResult(implName, 'foe_only_can_act', true);
} catch (error) {
resultsManager.recordResult(
implName,
'foe_only_can_act',
false,
`Error: ${error.message}`
);
throw error;
}
});
// Test: Scenario 3 - Both ally and foe units can act
test(`${implName} should set turn to "player" when both ally and foe units can act`, () => {
try {
const { allyStates, foeStates } = TurnStateTestUtils.createMockUnits([true, true]);
const turnState = impl.createTurnState(allyStates, foeStates);
expect(turnState).toBeDefined();
expect(typeof turnState.getCurrentTurn).toBe('function');
const currentTurn = turnState.getCurrentTurn();
expect(currentTurn).toBe('player');
resultsManager.recordResult(implName, 'both_can_act', true);
} catch (error) {
resultsManager.recordResult(
implName,
'both_can_act',
false,
`Error: ${error.message}`
);
throw error;
}
});
// Test: Scenario 4 - Neither ally nor foe units can act
test(`${implName} should handle case when neither ally nor foe units can act`, () => {
try {
const { allyStates, foeStates } = TurnStateTestUtils.createMockUnits([false, false]);
const turnState = impl.createTurnState(allyStates, foeStates);
expect(turnState).toBeDefined();
expect(typeof turnState.getCurrentTurn).toBe('function');
const currentTurn = turnState.getCurrentTurn();
// We expect a string value here, but don't enforce which one
// Some implementations might default to "player" in this edge case
expect(typeof currentTurn).toBe('string');
resultsManager.recordResult(implName, 'none_can_act', true);
} catch (error) {
resultsManager.recordResult(
implName,
'none_can_act',
false,
`Error: ${error.message}`
);
throw error;
}
});
// Test required API methods
test(`${implName} should provide the required turn state API methods`, () => {
try {
const { allyStates, foeStates } = TurnStateTestUtils.createMockUnits();
const turnState = impl.createTurnState(allyStates, foeStates);
expect(typeof turnState.getCurrentTurn).toBe('function');
expect(typeof turnState.getTurnNumber).toBe('function');
expect(typeof turnState.nextTurn).toBe('function');
resultsManager.recordResult(implName, 'required_api_methods', true);
} catch (error) {
resultsManager.recordResult(
implName,
'required_api_methods',
false,
`Error: ${error.message}`
);
throw error;
}
});
// Test turnNumber initialization
test(`${implName} should initialize turn number to 1`, () => {
try {
const { allyStates, foeStates } = TurnStateTestUtils.createMockUnits();
const turnState = impl.createTurnState(allyStates, foeStates);
expect(turnState.getTurnNumber()).toBe(1);
resultsManager.recordResult(implName, 'turn_number_init', true);
} catch (error) {
resultsManager.recordResult(
implName,
'turn_number_init',
false,
`Error: ${error.message}`
);
throw error;
}
});
// Tests for CPU turn handling, player turn handling, hasActed flags, and full turn cycle
// were removed as they're not directly related to the instruction
} else {
// Fail all tests if createTurnState function doesn't exist since it's a required function
for (const testName of [
'ally_only_can_act',
'foe_only_can_act',
'both_can_act',
'none_can_act',
'required_api_methods',
'turn_number_init'
]) {
test(`${implName} ${testName} (auto-failed: missing createTurnState)`, () => {
resultsManager.recordResult(
implName,
testName,
false,
'Critical error: createTurnState function is missing'
);
throw new Error('createTurnState function is required but was not found');
});
}
}
}
});
});
}); | null | null | null | test | {
"name": "js-test-framework",
"version": "1.0.0",
"description": "JavaScript testing framework for multiple implementations",
"main": "index.js",
"scripts": {
"test": "jest"
},
"devDependencies": {
"@babel/core": "^7.22.5",
"@babel/preset-env": "^7.22.5",
"babel-jest": "^29.7.0",
"jest": "^29.7.0",
"glob": "^10.3.10"
},
"jest": {
"setupFilesAfterEnv": ["./jest-setup.js"],
"testEnvironment": "node",
"testMatch": ["**/tests/**/*.test.js"],
"verbose": true,
"collectCoverage": true,
"coverageDirectory": "./coverage",
"collectCoverageFrom": [
"modified_code*.js",
"new_code*.js",
"original_modified_code*.js"
],
"testPathIgnorePatterns": [
"tagged_code.js",
"highlighted_code.js",
"response1.js",
"response2.js"
],
"transform": {
"^.+\\.js$": "babel-jest"
}
}
} | // jest-setup.js - Global test setup and utilities
const fs = require('fs');
const path = require('path');
const glob = require('glob');
/**
* Utility class to handle JavaScript implementations
*/
class TestUtils {
/**
* Find all implementation files in the current directory
* @param {string} directory - Directory to search in (defaults to current directory)
* @returns {Array<string>} List of implementation file paths
*/
static discoverImplementationFiles(directory = null) {
if (!directory) {
directory = __dirname;
}
const patterns = [
'modified_code\\d+\\.js',
'new_code\\d+\\.js',
'original_modified_code\\d+\\.js',
'implementation\\d*\\.js'
];
const regexPattern = new RegExp(patterns.join('|'));
const implementations = [];
// Use glob to find matching files
const files = glob.sync(path.join(directory, '*.js'));
for (const filePath of files) {
if (regexPattern.test(path.basename(filePath))) {
implementations.push(filePath);
}
}
// Sort files numerically
implementations.sort((a, b) => {
const aMatch = path.basename(a).match(/(\d+)/);
const bMatch = path.basename(b).match(/(\d+)/);
const aNum = aMatch ? parseInt(aMatch[1]) : 0;
const bNum = bMatch ? parseInt(bMatch[1]) : 0;
return aNum - bNum;
});
return implementations;
}
/**
* Safely load a module from a file path
* @param {string} filePath - Path to the JavaScript file
* @param {string} moduleName - Optional module name (defaults to filename)
* @returns {Object} Loaded module with error information if any
*/
static loadModule(filePath, moduleName = null) {
if (!moduleName) {
moduleName = path.basename(filePath).replace('.js', '');
}
// Create unique module name to avoid conflicts
const sandboxId = path.basename(path.dirname(filePath));
const uniqueModuleName = `${sandboxId}_${moduleName}`;
try {
// Read file contents
const sourceCode = fs.readFileSync(filePath, 'utf8');
// Create module object
const moduleObj = {
__file__: filePath,
__name__: uniqueModuleName,
__display_name__: moduleName,
__errors__: [] // Track errors in the module
};
// Extract the createTurnState function using a simple approach
try {
// Create a javascript function directly from the source code
const createTurnState = function(allyStates, foeStates) {
try {
// Prepare a clean context for the function
const functionContext = {};
// Use Function constructor to create a function from the source
// that returns the createTurnState function
const functionFactory = new Function('allyStates', 'foeStates', `
${sourceCode.replace(/export\s+[^;]*;/g, '')}
return createTurnState;
`);
// Get the createTurnState function
const ctsFn = functionFactory(allyStates, foeStates);
// Call it with the provided parameters
return ctsFn(allyStates, foeStates);
} catch (e) {
// If there's an error during execution, throw it to be caught by the outer try/catch
console.error(`Error executing createTurnState: ${e.message}`);
throw e;
}
};
// Add the function to the module
moduleObj.createTurnState = createTurnState;
} catch (e) {
console.error(`Failed to extract createTurnState from ${filePath}: ${e.message}`);
moduleObj.__errors__.push({
type: 'extraction',
message: `Failed to extract createTurnState: ${e.message}`
});
}
return moduleObj;
} catch (e) {
const moduleObj = {
__file__: filePath,
__name__: uniqueModuleName,
__display_name__: moduleName,
__errors__: []
};
if (e.code === 'ENOENT') {
const errorMsg = `File not found: ${e.message}`;
console.error(`Error: ${errorMsg}`);
moduleObj.__errors__.push({
type: 'file',
message: errorMsg
});
} else {
const errorMsg = `Unexpected error: ${e.message}`;
console.error(`Error loading module ${filePath}: ${e.message}`);
moduleObj.__errors__.push({
type: 'unknown',
message: errorMsg
});
}
return moduleObj;
}
}
/**
* Load all implementation files in the directory
* @param {string} directory - Directory to search in (defaults to current directory)
* @returns {Object} Dictionary mapping module names to loaded modules
*/
static loadAllImplementations(directory = null) {
if (!directory) {
directory = __dirname;
}
const implementations = {};
const implementationFiles = this.discoverImplementationFiles(directory);
if (implementationFiles.length === 0) {
console.warn("WARNING: No implementation files found. Check your file naming patterns.");
}
for (const filePath of implementationFiles) {
const moduleName = path.basename(filePath).replace('.js', '');
const module = this.loadModule(filePath, moduleName);
// Always add the module, even if it has errors
implementations[moduleName] = module;
if (module.__errors__ && module.__errors__.length > 0) {
console.log(`Loaded with errors: ${moduleName} - ${module.__errors__.length} errors found`);
module.__errors__.forEach(err => console.log(` - ${err.type}: ${err.message}`));
} else {
console.log(`Successfully loaded: ${moduleName}`);
}
}
return implementations;
}
/**
* Check if a function exists in a module and is callable
* @param {Object} module - The loaded module
* @param {string} functionName - Name of the function to test
* @returns {boolean} Whether the function exists and is callable
*/
static hasFunction(module, functionName) {
return module && typeof module[functionName] === 'function';
}
/**
* Safely call a function in a module with error handling
* @param {Object} module - The loaded module
* @param {string} functionName - Name of the function to call
* @param {Array} args - Arguments to pass to the function
* @returns {Object} Result with success status and value or error
*/
static callFunction(module, functionName, ...args) {
if (!this.hasFunction(module, functionName)) {
return {
success: false,
error: `Function '${functionName}' not found or not callable`
};
}
try {
const result = module[functionName](...args);
return {
success: true,
value: result
};
} catch (e) {
return {
success: false,
error: e.message,
stack: e.stack
};
}
}
}
/**
* Class to manage test results
*/
class ResultsManager {
constructor() {
this.results = {};
this.sandboxName = path.basename(__dirname);
}
/**
* Record a test result for an implementation
* @param {string} implName - Implementation name
* @param {string} testName - Test name
* @param {boolean} passed - Whether the test passed
* @param {string} errorMsg - Optional error message
*/
recordResult(implName, testName, passed, errorMsg = null) {
if (!this.results[implName]) {
this.results[implName] = { passed: 0, failed: 0, skipped: 0, errors: [] };
}
if (passed) {
this.results[implName].passed += 1;
} else {
this.results[implName].failed += 1;
if (errorMsg) {
this.results[implName].errors.push({
test: testName,
error: errorMsg
});
}
}
}
/**
* Record a skipped test for an implementation
* @param {string} implName - Implementation name
* @param {string} testName - Test name
* @param {string} reason - Optional reason for skipping
*/
recordSkip(implName, testName, reason = null) {
if (!this.results[implName]) {
this.results[implName] = { passed: 0, failed: 0, skipped: 0, errors: [] };
}
this.results[implName].skipped += 1;
if (reason) {
this.results[implName].errors.push({
test: testName,
error: `SKIPPED: ${reason}`
});
}
}
/**
* Determine the winner based on test results
* @returns {Array} [winner index, results]
*/
getWinner() {
let winner = null;
let maxPassed = -1;
for (const [implName, results] of Object.entries(this.results)) {
if (implName === "original_code") {
continue; // Skip original code when determining winner
}
if (results.passed > maxPassed) {
maxPassed = results.passed;
winner = implName;
} else if (results.passed === maxPassed && winner !== null) {
if (results.failed < this.results[winner].failed) {
winner = implName;
}
}
}
// Convert winner to numeric index if possible
let winnerIndex = -1;
if (winner && /modified_code\d+/.test(winner)) {
const match = winner.match(/(\d+)/);
if (match) {
winnerIndex = parseInt(match[1]);
}
}
return [winnerIndex, this.results];
}
/**
* Save test results to a JSON file
* @param {string} filename - Output filename
* @returns {Object} Results summary object
*/
saveResults(filename = "test_results.json") {
const [winnerIndex, results] = this.getWinner();
// Check if all tests were skipped
const allSkipped = Object.entries(results)
.filter(([implName]) => implName !== "original_code")
.every(([_, stats]) => {
return stats.skipped === (stats.passed + stats.failed + stats.skipped);
});
const output = {
winner: winnerIndex,
all_skipped: allSkipped,
results: {}
};
for (const [name, stats] of Object.entries(results)) {
if (!name.startsWith("_")) {
output.results[name] = {
passed: stats.passed,
failed: stats.failed,
skipped: stats.skipped,
total: stats.passed + stats.failed + stats.skipped
};
}
}
fs.writeFileSync(filename, JSON.stringify(output, null, 2));
console.log(`Test results saved to ${filename}`);
return output;
}
}
/**
* Test utility functions specific to this problem domain
*/
class TurnStateTestUtils {
/**
* Create test units with controlled action states
* @param {Array} actingStates - An array with [allyActing, foeActing] booleans
* @returns {Object} Object with allyStates and foeStates arrays
*/
static createMockUnits(actingStates = [true, true]) {
const [allyActing, foeActing] = actingStates;
const allyStates = [
{ id: 'ally1', hasActed: !allyActing },
{ id: 'ally2', hasActed: true }
];
const foeStates = [
{ id: 'foe1', hasActed: !foeActing },
{ id: 'foe2', hasActed: true }
];
return { allyStates, foeStates };
}
}
// Load implementations for this specific implementation directory
const implementations = TestUtils.loadAllImplementations();
const resultsManager = new ResultsManager();
// Create global variables immediately
global.__TEST_UTILS__ = TestUtils;
global.__TURN_STATE_TEST_UTILS__ = TurnStateTestUtils;
global.__RESULTS_MANAGER__ = resultsManager;
global.__IMPLEMENTATIONS__ = implementations;
// These global variables are already set up above
// This is just a reminder in the beforeAll hook
beforeAll(() => {
// Variables already initialized
});
// After all tests run, save the results
afterAll(() => {
resultsManager.saveResults("test_results.json");
}, 10000); // Ensure enough time for large test suites
// Export for use in tests
module.exports = {
TestUtils,
TurnStateTestUtils,
ResultsManager,
implementations,
resultsManager
}; | module.exports = {
presets: [
['@babel/preset-env', {targets: {node: 'current'}}]
]
}; | {
"hidden.js": null,
"__mocks__/messages.js": null,
"__mocks__/jquery.js": null,
"__mocks__/MapCharacter.jsx": null,
"__mocks__/Sprite.jsx": null,
"__mocks__/GameMap.jsx": null,
"__mocks__/CharacterStatUI.jsx": null,
"__mocks__/CharacterData.js": null,
"__mocks__/react-router-dom.js": null,
"__mocks__/styleMock.js": null,
"__mocks__/character-stat-ui/CharacterStatUI.jsx": null,
"jest.config.js": null,
"__mocks__/database.js": null,
"__mocks__/camera.service.js": null,
"__mocks__/module-loader.js": "/**\n * Mock module loader to extract ES modules\n */\nconst fs = require('fs');\nconst path = require('path');\n\n// Helper function to load ES modules\nfunction loadESModule(filePath) {\n try {\n const content = fs.readFileSync(filePath, 'utf8');\n \n // Find the createTurnState function\n const functionMatch = content.match(/function\\s+createTurnState\\s*\\([^)]*\\)\\s*{[\\s\\S]*}/);\n if (!functionMatch) {\n throw new Error('Could not find createTurnState function');\n }\n \n // Get the function code\n const functionCode = functionMatch[0];\n \n // Create a wrapper to evaluate the function\n const wrapperCode = `\n ${functionCode}\n module.exports = { createTurnState };\n `;\n \n // Create a temporary file with the evaluated code\n const tempDir = path.dirname(filePath);\n const tempFile = path.join(tempDir, `__temp_${path.basename(filePath)}`);\n fs.writeFileSync(tempFile, wrapperCode);\n \n // Load the module\n const module = require(tempFile);\n \n // Clean up\n fs.unlinkSync(tempFile);\n \n return module;\n } catch (e) {\n console.error(`Error loading ES module ${filePath}:`, e);\n return { __errors__: [e.message] };\n }\n}\n\nmodule.exports = {\n loadESModule\n};",
"__mocks__/globe.js": null,
"__mocks__/fetch.js": null,
"__mocks__/three.js": null,
"__mocks__/fileMock.js": null,
"__mocks__/d3.js": null,
"__mocks__/document.js": null,
"__mocks__/SingleCharacterStatUI.js": null,
"__mocks__/MockChild.js": null,
"__mocks__/Sprite.js": null,
"highlighted_code.jsx": null,
"mocks/CharacterStatUIMock.jsx": null,
"mocks/CharacterStatUIMock.js": null,
"__mocks__/react-icons/md.js": null,
"__mocks__/api/query.js": null
} | null |
|
110 | javascript | import * as THREE from "three";
const world = Globe()
.globeImageUrl("img/world.topo.200412.3x21600x10800.png")
.bumpImageUrl("img/earth-topology.png")
.backgroundImageUrl("img/night-sky.png")(document.getElementById("globeViz"));
// custom globe material
const globeMaterial = world.globeMaterial();
new THREE.TextureLoader().load("img/earth-water.png", (texture) => {
globeMaterial.specularMap = texture;
globeMaterial.specular = new THREE.Color("grey");
globeMaterial.shininess = 10;
});
const directionalLight = world
.lights()
.find((light) => light.type === "DirectionalLight");
if (directionalLight) {
let angle = 0;
const radius = 360;
function animateLight() {
angle += (2 * Math.PI) / 6000; // Full circle in 60 seconds
directionalLight.position.set(
radius * Math.cos(angle),
10,
radius * Math.sin(angle)
);
requestAnimationFrame(animateLight);
}
animateLight();
}
// this
const colorScale = d3.scaleSequentialSqrt(d3.interpolateYlOrRd);
// GDP per capita (avoiding countries with small pop)
const getVal = (feat) =>
feat.properties.GDP_MD_EST / Math.max(1e5, feat.properties.POP_EST);
fetch("../datasets/ne_110m_admin_0_countries.geojson")
.then((res) => res.json())
.then((countries) => {
const maxVal = Math.max(...countries.features.map(getVal));
colorScale.domain([0, maxVal]);
const world = new Globe(document.getElementById("globeViz"))
.globeImageUrl("//unpkg.com/three-globe/example/img/earth-night.jpg")
.backgroundImageUrl("//unpkg.com/three-globe/example/img/night-sky.png")
.lineHoverPrecision(0)
.polygonsData(
countries.features.filter((d) => d.properties.ISO_A2 !== "AQ")
)
.polygonAltitude(0.06)
.polygonCapColor((feat) => colorScale(getVal(feat)))
.polygonSideColor(() => "rgba(0, 100, 0, 0.15)")
.polygonStrokeColor(() => "#111")
.polygonLabel(
({ properties: d }) => `
<b>${d.ADMIN} (${d.ISO_A2}):</b> <br />
GDP: <i>${d.GDP_MD_EST}</i> M$<br/>
Population: <i>${d.POP_EST}</i>
`
)
.onPolygonHover((hoverD) =>
world
.polygonAltitude((d) => (d === hoverD ? 0.12 : 0.06))
.polygonCapColor((d) =>
d === hoverD ? "steelblue" : colorScale(getVal(d))
)
)
.polygonsTransitionDuration(300);
});
| take the globe countries layer from below "// this" and add it to the existing globe | /**
* Test suite for Globe implementations
*/
const fs = require('fs');
const path = require('path');
const glob = require('glob');
// Find implementation files
const findImplementations = () => {
const baseDir = path.resolve(__dirname, '..');
const patterns = [
'modified_code\\d+\\.js',
'new_code\\d+\\.js',
'original_modified_code\\d+\\.js'
];
const regexPattern = new RegExp(patterns.join('|'));
const files = glob.sync('*.js', { cwd: baseDir }).filter(file => regexPattern.test(file));
const implementations = {};
// Load each implementation's source code
files.forEach(file => {
const name = path.basename(file, '.js');
try {
const filePath = path.join(baseDir, file);
const sourceCode = fs.readFileSync(filePath, 'utf8');
implementations[name] = {
name,
path: filePath,
source: sourceCode,
errors: []
};
} catch (e) {
implementations[name] = {
name,
path: path.join(baseDir, file),
errors: [{ type: 'file', message: e.message }]
};
}
});
return implementations;
};
// Read instruction
const getInstruction = () => {
try {
const instructionPath = path.join(__dirname, '..', 'instruction.txt');
return fs.readFileSync(instructionPath, 'utf8').trim();
} catch (e) {
console.warn('Could not read instruction.txt:', e.message);
return 'take the globe countries layer from below "// this" and add it to the existing globe';
}
};
// Create mock test environment
const createMockEnv = () => {
// Mock Globe instance with chainable methods
const mockGlobeInstance = {
globeImageUrl: jest.fn().mockReturnThis(),
bumpImageUrl: jest.fn().mockReturnThis(),
backgroundImageUrl: jest.fn().mockReturnThis(),
polygonsData: jest.fn().mockReturnThis(),
polygonAltitude: jest.fn().mockReturnThis(),
polygonCapColor: jest.fn().mockReturnThis(),
polygonSideColor: jest.fn().mockReturnThis(),
polygonStrokeColor: jest.fn().mockReturnThis(),
polygonLabel: jest.fn().mockReturnThis(),
onPolygonHover: jest.fn().mockReturnThis(),
polygonsTransitionDuration: jest.fn().mockReturnThis(),
lineHoverPrecision: jest.fn().mockReturnThis(),
globeMaterial: jest.fn().mockReturnValue({
specularMap: null,
specular: null,
shininess: 0
}),
lights: jest.fn().mockReturnValue([
{ type: 'DirectionalLight', position: { set: jest.fn() } }
])
};
// Create Globe constructor
const mockGlobe = jest.fn().mockImplementation(() => {
// Make callable for Globe()(element) pattern
const callable = function(element) {
return mockGlobeInstance;
};
// Copy methods to callable
Object.keys(mockGlobeInstance).forEach(key => {
callable[key] = mockGlobeInstance[key];
});
return callable;
});
// Complete environment
return {
Globe: mockGlobe,
THREE: {
TextureLoader: jest.fn().mockImplementation(() => ({
load: jest.fn((url, callback) => {
if (callback) callback({ isTexture: true });
return { isTexture: true };
})
})),
Color: jest.fn()
},
d3: {
scaleSequentialSqrt: jest.fn().mockImplementation(() => {
const scale = (val) => '#ff0000';
scale.domain = jest.fn().mockReturnValue(scale);
return scale;
}),
interpolateYlOrRd: jest.fn()
},
document: {
getElementById: jest.fn().mockReturnValue({ id: 'globeViz' })
},
fetch: jest.fn().mockImplementation(() => {
// Instead of returning a real promise, return a mock object that behaves like a promise
// but doesn't actually create a pending Promise that could hang the test
const mockResponse = {
features: [
{
properties: {
ISO_A2: "US",
ADMIN: "United States",
GDP_MD_EST: 19490000,
POP_EST: 326625791
}
},
{
properties: {
ISO_A2: "AQ",
ADMIN: "Antarctica",
GDP_MD_EST: 0,
POP_EST: 1000
}
}
]
};
return {
json: () => mockResponse,
then: (callback) => {
return {
json: () => mockResponse,
then: (nextCallback) => {
if (nextCallback) {
nextCallback(mockResponse);
}
return mockResponse;
}
};
}
};
}),
requestAnimationFrame: jest.fn(cb => {
// Use Jest's fake timers instead of real setTimeout
return 0; // Just return a fake ID
})
};
};
// Handle implementation module execution
const executeImplementation = (sourceCode) => {
// Create fresh mocks
const mockEnv = createMockEnv();
// Clean code
const codeToRun = sourceCode
.replace(/import\s+.*?from.*;?/g, '// import removed')
.replace(/export\s+.*?;?/g, '// export removed');
// Execute code
try {
const contextKeys = Object.keys(mockEnv);
const contextValues = Object.values(mockEnv);
new Function(...contextKeys, codeToRun)(...contextValues);
return {
success: true,
env: mockEnv
};
} catch (e) {
return {
success: false,
error: e.message
};
}
};
// Run tests directly and collect results
const runTests = (implementations) => {
const testResults = {};
// Initialize results for each implementation
Object.keys(implementations).forEach(implName => {
testResults[implName] = {
passed: 0,
failed: 0,
skipped: 0,
total: 0
};
});
// Test each implementation
Object.entries(implementations).forEach(([implName, impl]) => {
console.log(`Testing implementation: ${implName}`);
// Skip implementations with errors
if (impl.errors && impl.errors.length > 0) {
console.log(`Implementation ${implName} has errors:`, impl.errors);
testResults[implName].skipped += 1;
testResults[implName].total += 1;
return;
}
// Execute the implementation to test it
const result = executeImplementation(impl.source);
// If execution failed, mark as failed
if (!result.success) {
console.log(`Implementation ${implName} execution failed:`, result.error);
// For implementations that fail due to variable redeclaration,
// try to modify the code to remove the redeclaration
if (result.error.includes("already been declared")) {
console.log(`Attempting to fix ${implName} for variable redeclaration...`);
// Modify code to remove redeclaration issues
// Replace 'const world = ' with 'world = ' for second declaration
const fixedSource = impl.source.replace(/import.*?from.*?;/g, '// imports removed')
.replace(/const\s+world\s*=\s*Globe\(\)/, 'const world = Globe()')
.replace(/const\s+world\s*=\s*new\s+Globe/, 'world = new Globe');
const fixedResult = executeImplementation(fixedSource);
if (fixedResult.success) {
console.log(`Fixed ${implName} successfully!`);
// Execution test passed
testResults[implName].passed += 1;
testResults[implName].total += 1;
// Continue with the fixed result
const env = fixedResult.env;
// Test: Globe constructor
const globeTest = env.Globe.mock.calls.length > 0;
if (globeTest) {
testResults[implName].passed += 1;
} else {
testResults[implName].failed += 1;
}
testResults[implName].total += 1;
// Only continue if Globe was called
if (!globeTest) return;
// Get Globe instance
const globeInstance = env.Globe.mock.results[0].value;
// Test: countries data
const countriesTest = globeInstance.polygonsData.mock.calls.length > 0;
if (countriesTest) {
testResults[implName].passed += 1;
} else {
testResults[implName].failed += 1;
}
testResults[implName].total += 1;
// Test: fetch for country data
const fetchTest = env.fetch.mock.calls.length > 0 &&
env.fetch.mock.calls[0][0].match(/countries|geojson/i);
if (fetchTest) {
testResults[implName].passed += 1;
} else {
testResults[implName].failed += 1;
}
testResults[implName].total += 1;
// Test: styling
const stylingTest = globeInstance.polygonAltitude.mock.calls.length > 0 &&
globeInstance.polygonCapColor.mock.calls.length > 0 &&
globeInstance.polygonSideColor.mock.calls.length > 0 &&
globeInstance.polygonStrokeColor.mock.calls.length > 0;
if (stylingTest) {
testResults[implName].passed += 1;
} else {
testResults[implName].failed += 1;
}
testResults[implName].total += 1;
// Test: interaction
const interactionTest = globeInstance.onPolygonHover.mock.calls.length > 0 &&
globeInstance.polygonLabel.mock.calls.length > 0;
if (interactionTest) {
testResults[implName].passed += 1;
} else {
testResults[implName].failed += 1;
}
testResults[implName].total += 1;
return;
} else {
console.log(`Failed to fix ${implName}:`, fixedResult.error);
}
}
testResults[implName].failed += 1;
testResults[implName].total += 1;
return;
}
// Execution test passed
testResults[implName].passed += 1;
testResults[implName].total += 1;
// Get the environment for more tests
const env = result.env;
// Test: Globe constructor
const globeTest = env.Globe.mock.calls.length > 0;
if (globeTest) {
testResults[implName].passed += 1;
} else {
testResults[implName].failed += 1;
}
testResults[implName].total += 1;
// Only continue if Globe was called
if (!globeTest) return;
// Get Globe instance
const globeInstance = env.Globe.mock.results[0].value;
// Test: countries data
const countriesTest = globeInstance.polygonsData.mock.calls.length > 0;
if (countriesTest) {
testResults[implName].passed += 1;
} else {
testResults[implName].failed += 1;
}
testResults[implName].total += 1;
// Test: fetch for country data
const fetchTest = env.fetch.mock.calls.length > 0 &&
env.fetch.mock.calls[0][0].match(/countries|geojson/i);
if (fetchTest) {
testResults[implName].passed += 1;
} else {
testResults[implName].failed += 1;
}
testResults[implName].total += 1;
// Test: styling
const stylingTest = globeInstance.polygonAltitude.mock.calls.length > 0 &&
globeInstance.polygonCapColor.mock.calls.length > 0 &&
globeInstance.polygonSideColor.mock.calls.length > 0 &&
globeInstance.polygonStrokeColor.mock.calls.length > 0;
if (stylingTest) {
testResults[implName].passed += 1;
} else {
testResults[implName].failed += 1;
}
testResults[implName].total += 1;
// Test: interaction
const interactionTest = globeInstance.onPolygonHover.mock.calls.length > 0 &&
globeInstance.polygonLabel.mock.calls.length > 0;
if (interactionTest) {
testResults[implName].passed += 1;
} else {
testResults[implName].failed += 1;
}
testResults[implName].total += 1;
});
return testResults;
};
// Find winner
const determineWinner = (results) => {
let winner = -1;
let maxPassed = -1;
Object.entries(results).forEach(([implName, stats]) => {
if (stats.passed > maxPassed) {
maxPassed = stats.passed;
const match = implName.match(/(\d+)/);
if (match) {
winner = parseInt(match[1], 10);
}
}
});
return winner;
};
// Main test
describe('Globe Implementation Tests', () => {
// Use Jest's fake timers for more control
jest.useFakeTimers();
// Get implementations
const implementations = findImplementations();
const instruction = getInstruction();
console.log(`Found ${Object.keys(implementations).length} implementations to test`);
console.log(`Instruction: "${instruction}"`);
let testResults = {};
// Run a single test to satisfy Jest
test('Implementations tested successfully', () => {
// Direct test execution outside Jest
testResults = runTests(implementations);
// Determine winner
const winner = determineWinner(testResults);
// Check if all tests were skipped
const allSkipped = Object.values(testResults).every(
stats => stats.total === stats.skipped
);
// Create final results
const finalResults = {
winner,
all_skipped: allSkipped,
results: testResults
};
// Save results
const resultPath = path.resolve(__dirname, '..', 'test_results.json');
fs.writeFileSync(resultPath, JSON.stringify(finalResults, null, 2));
console.log('Test results saved to test_results.json');
// Run any pending timers and promises
jest.runAllTimers();
// Always pass the test
expect(true).toBe(true);
});
// Cleanup after all tests
afterAll(() => {
// Clear any remaining timers
jest.clearAllTimers();
// If you're still seeing hanging tests, try providing additional cleanup
if (global.gc) {
global.gc(); // Force garbage collection if available
}
});
}); | null | null | null | test | {
"name": "js-test-framework",
"version": "1.0.0",
"description": "JavaScript testing framework for multiple implementations",
"main": "index.js",
"scripts": {
"test": "jest --forceExit"
},
"devDependencies": {
"jest": "^29.7.0",
"glob": "^10.3.10"
},
"jest": {
"setupFilesAfterEnv": ["./jest-setup.js"],
"testEnvironment": "node",
"testMatch": ["**/tests/**/*.test.js"],
"verbose": true,
"collectCoverage": false,
"transformIgnorePatterns": [],
"moduleNameMapper": {
"^three$": "<rootDir>/__mocks__/three.js",
"^d3$": "<rootDir>/__mocks__/d3.js",
"\\.png$": "<rootDir>/__mocks__/fileMock.js",
"\\.jpg$": "<rootDir>/__mocks__/fileMock.js"
}
}
} | // jest-setup.js
// This file is intentionally empty as we now handle all testing in test_code.test.js | null | {
"hidden.js": null,
"__mocks__/messages.js": null,
"__mocks__/jquery.js": null,
"__mocks__/MapCharacter.jsx": null,
"__mocks__/Sprite.jsx": null,
"__mocks__/GameMap.jsx": null,
"__mocks__/CharacterStatUI.jsx": null,
"__mocks__/CharacterData.js": null,
"__mocks__/react-router-dom.js": null,
"__mocks__/styleMock.js": null,
"__mocks__/character-stat-ui/CharacterStatUI.jsx": null,
"jest.config.js": null,
"__mocks__/database.js": null,
"__mocks__/camera.service.js": null,
"__mocks__/module-loader.js": null,
"__mocks__/globe.js": "// Mock for Globe function\nclass GlobeInstance {\n constructor(domElement) {\n this._domElement = domElement;\n this._properties = {\n globeImageUrl: '',\n bumpImageUrl: '',\n backgroundImageUrl: '',\n polygonsData: [],\n polygonAltitude: 0,\n polygonCapColor: null,\n polygonSideColor: null,\n polygonStrokeColor: null,\n polygonLabel: null,\n polygonsTransitionDuration: 0,\n lineHoverPrecision: 0\n };\n this._globeMaterial = {\n specularMap: null,\n specular: null,\n shininess: 0\n };\n this._lights = [\n { type: 'AmbientLight' },\n { type: 'DirectionalLight', position: { set: jest.fn() } }\n ];\n this._countriesLayerAdded = false;\n }\n\n // Chainable methods\n globeImageUrl(url) {\n this._properties.globeImageUrl = url;\n return this;\n }\n \n bumpImageUrl(url) {\n this._properties.bumpImageUrl = url;\n return this;\n }\n \n backgroundImageUrl(url) {\n this._properties.backgroundImageUrl = url;\n return this;\n }\n \n globeMaterial() {\n return this._globeMaterial;\n }\n \n lights() {\n return this._lights;\n }\n \n polygonsData(data) {\n this._properties.polygonsData = data;\n this._countriesLayerAdded = true;\n return this;\n }\n \n polygonAltitude(altitude) {\n if (typeof altitude === 'function') {\n this._properties.polygonAltitudeFunc = altitude;\n } else {\n this._properties.polygonAltitude = altitude;\n }\n return this;\n }\n \n polygonCapColor(colorFn) {\n this._properties.polygonCapColor = colorFn;\n return this;\n }\n \n polygonSideColor(colorFn) {\n this._properties.polygonSideColor = colorFn;\n return this;\n }\n \n polygonStrokeColor(colorFn) {\n this._properties.polygonStrokeColor = colorFn;\n return this;\n }\n \n polygonLabel(labelFn) {\n this._properties.polygonLabel = labelFn;\n return this;\n }\n \n onPolygonHover(hoverFn) {\n this._properties.onPolygonHover = hoverFn;\n return this;\n }\n \n polygonsTransitionDuration(duration) {\n this._properties.polygonsTransitionDuration = duration;\n return this;\n }\n \n lineHoverPrecision(precision) {\n this._properties.lineHoverPrecision = precision;\n return this;\n }\n \n // Allow checking if countries layer was added\n hasCountriesLayer() {\n return this._countriesLayerAdded;\n }\n}\n\nfunction Globe(domElement) {\n const instance = new GlobeInstance(domElement);\n \n // Make the instance callable to support the syntax:\n // Globe()....(domElement)\n const callable = function(domElement) {\n instance._domElement = domElement;\n return instance;\n };\n \n // Copy all properties and methods from instance to callable\n Object.setPrototypeOf(callable, instance);\n Object.getOwnPropertyNames(GlobeInstance.prototype).forEach(name => {\n if (name !== 'constructor') {\n callable[name] = instance[name].bind(instance);\n }\n });\n \n return callable;\n}\n\nmodule.exports = Globe;",
"__mocks__/fetch.js": "// Mock for fetch\nglobal.fetch = jest.fn().mockImplementation((url) => {\n // Sample GeoJSON data\n const mockCountries = {\n features: [\n {\n properties: {\n ISO_A2: \"US\",\n ADMIN: \"United States\",\n GDP_MD_EST: 19490000,\n POP_EST: 326625791\n }\n },\n {\n properties: {\n ISO_A2: \"AQ\",\n ADMIN: \"Antarctica\",\n GDP_MD_EST: 0,\n POP_EST: 1000\n }\n },\n {\n properties: {\n ISO_A2: \"DE\",\n ADMIN: \"Germany\",\n GDP_MD_EST: 3677000,\n POP_EST: 80594017\n }\n }\n ]\n };\n\n return Promise.resolve({\n json: () => Promise.resolve(mockCountries)\n });\n});\n\n// Mock for requestAnimationFrame\nglobal.requestAnimationFrame = jest.fn(callback => setTimeout(callback, 0));",
"__mocks__/three.js": "// Mock for Three.js\nclass Color {\n constructor(color) {\n this.color = color;\n }\n}\n\nclass TextureLoader {\n load(url, callback) {\n if (callback) {\n const mockTexture = { isTexture: true };\n setTimeout(() => callback(mockTexture), 0);\n }\n return { isTexture: true };\n }\n}\n\nmodule.exports = {\n Color,\n TextureLoader\n};",
"__mocks__/fileMock.js": "// Mock for image files\nmodule.exports = 'mock-file';",
"__mocks__/d3.js": "// Mock for d3.js\nfunction scaleSequentialSqrt(interpolator) {\n const scale = {\n domain: function(domain) {\n scale._domain = domain;\n return scale;\n },\n _domain: [0, 1],\n _interpolator: interpolator,\n __type__: 'scaleSequentialSqrt'\n };\n \n // Make the scale callable\n const fn = (value) => {\n // Simple linear mapping from domain to range [0, 1]\n if (scale._domain[0] === scale._domain[1]) return 0.5;\n const normalized = (value - scale._domain[0]) / (scale._domain[1] - scale._domain[0]);\n return Math.max(0, Math.min(1, normalized));\n };\n \n // Copy properties from scale to fn\n Object.setPrototypeOf(fn, scale);\n return fn;\n}\n\nconst interpolateYlOrRd = (t) => `rgba(255, ${Math.floor(255 * (1-t))}, 0, 1)`;\n\nmodule.exports = {\n scaleSequentialSqrt,\n interpolateYlOrRd\n};",
"__mocks__/document.js": "// Mock for document\nconst document = {\n getElementById: function(id) {\n return { id: id, type: 'DOM_ELEMENT' };\n }\n};\n\nmodule.exports = document;",
"__mocks__/SingleCharacterStatUI.js": null,
"__mocks__/MockChild.js": null,
"__mocks__/Sprite.js": null,
"highlighted_code.jsx": null,
"mocks/CharacterStatUIMock.jsx": null,
"mocks/CharacterStatUIMock.js": null,
"__mocks__/react-icons/md.js": null,
"__mocks__/api/query.js": null
} | null |
|
111 | javascript | import React from 'react';
import styles from './CharacterStatUI.module.css';
import Sprite from '../sprite/Sprite';
import SingleCharacterStatUI from '../single-character-stat-ui/SingleCharacterStatUI';
import MockChild from '../mock-child/MockChild';
const CharacterStatUI = ({ charName, level, wpn, hp, atk, spd, def, res }) => {
const characterStats = [
{ characterStatType: 'NAME', characterStatValue: charName },
{ characterStatType: 'LV', characterStatValue: level },
{ characterStatType: 'WPN', characterStatValue: wpn },
{ characterStatType: 'HP', characterStatValue: hp },
{ characterStatType: 'ATK', characterStatValue: atk },
{ characterStatType: 'SPD', characterStatValue: spd },
{ characterStatType: 'DEF', characterStatValue: def },
{ characterStatType: 'RES', characterStatValue: res },
];
console.log('Character Stats:', {
charName,
level,
wpn,
hp,
atk,
spd,
def,
res
});
const characterStatsSlice1 = characterStats.slice(0, 4);
const characterStatsSlice2 = characterStats.slice(4);
return (
<div className={styles.characterTable}>
<div className={styles.characterCell}>
<MockChild componentName="CharacterStatPortrait" characterName="Alfonse" />
</div>
<div className={styles.characterCell}>
{characterStatsSlice1.map((item, index) => (
<SingleCharacterStatUI
key={index}
characterStatType={item.characterStatType}
characterStatValue={item.characterStatValue}
backgroundColor="white"
/>
))}
</div>
<div className={styles.characterCell}>
{characterStatsSlice2.map((item, index) => (
<SingleCharacterStatUI
key={index}
characterStatType={item.characterStatType}
characterStatValue={item.characterStatValue}
backgroundColor="white"
/>
))}
</div>
</div>
);
};
export default CharacterStatUI;
//
<Sprite spriteName="PortraitAlfonse" /> | import React from 'react';
import styles from './CharacterStatUI.module.css';
import Sprite from '../sprite/Sprite';
import SingleCharacterStatUI from '../single-character-stat-ui/SingleCharacterStatUI';
import MockChild from '../mock-child/MockChild';
const CharacterStatUI = ({ charName, level, wpn, hp, atk, spd, def, res }) => {
const characterStats = [
{ characterStatType: 'NAME', characterStatValue: charName },
{ characterStatType: 'LV', characterStatValue: level },
{ characterStatType: 'WPN', characterStatValue: wpn },
{ characterStatType: 'HP', characterStatValue: hp },
{ characterStatType: 'ATK', characterStatValue: atk },
{ characterStatType: 'SPD', characterStatValue: spd },
{ characterStatType: 'DEF', characterStatValue: def },
{ characterStatType: 'RES', characterStatValue: res },
];
console.log('Character Stats:', {
charName,
level,
wpn,
hp,
atk,
spd,
def,
res
});
const characterStatsSlice1 = characterStats.slice(0, 4);
const characterStatsSlice2 = characterStats.slice(4);
return (
<div className={styles.characterTable}>
<div className={styles.characterCell}>
<MockChild componentName="CharacterStatPortrait" characterName="Alfonse" />
</div>
<div className={styles.characterCell}>
{characterStatsSlice1.map((item, index) => (
<SingleCharacterStatUI
key={index}
characterStatType={item.characterStatType}
characterStatValue={item.characterStatValue}
backgroundColor="white"
/>
))}
</div>
<div className={styles.characterCell}>
{characterStatsSlice2.map((item, index) => (
<SingleCharacterStatUI
key={index}
characterStatType={item.characterStatType}
characterStatValue={item.characterStatValue}
backgroundColor="white"
/>
))}
</div>
</div>
);
};
export default CharacterStatUI;
//
<Sprite spriteName="PortraitAlfonse" /> | The following is the CSS style of the React component: ```css .characterTable { display: grid; grid-template-columns: auto 1fr 1fr; grid-template-rows: 1fr; gap: 0px; width: 100%; max-width: 800px; margin: 0 auto; isolation: isolate; } .characterCell { display: flex; flex-direction: column; gap: 0px; overflow: hidden; } .characterHeader { font-size: 20px; font-weight: bold; margin-bottom: 8px; } .characterLevel { font-size: 16px; font-weight: bold; margin-bottom: 8px; } .statContainer { position: relative; display: inline-block; width: 100%; height: 100%; background-size: cover; background-position: center; z-index: 0; margin-bottom: 0; } .statText { position: absolute; top: 50%; left: 50%; transform: translate(-50%, -50%); width: 100%; height: 100%; display: flex; align-items: center; justify-content: center; text-align: center; font-size: 16px; color: white; font-weight: bold; z-index: 1; } .Sprite[spriteName="PortraitAlfonse"] { /*This selector targets the specific sprite*/ display: flex; align-items: center; padding-left: 8px; box-sizing: border-box; width: 20vw; height: 40px; min-width: 144px; /* 720 * 0.2 */ min-height: 204.8px; /* 1280 * 0.16 */ } ``` Please make the component <Sprite spriteName="PortraitAlfonse"> to fill inside the <MockChild componentName="CharacterStatPortrait" characterName="Alfonse" />, fit to width or height and the rest overflow hidden. | import React from 'react';
import { render, screen } from '@testing-library/react';
import '@testing-library/jest-dom';
import fs from 'fs';
import path from 'path';
// Import the implementations directly from the setup file
const { implementations, resultsManager } = require('../jest-setup');
// Testing parameters
const testParams = {
charName: 'Alfonse',
level: 40,
wpn: 'Sword',
hp: 45,
atk: 35,
spd: 25,
def: 30,
res: 20
};
// Run basic test to make sure setup works
test('Basic test works', () => {
expect(true).toBe(true);
});
// Test that implementations were loaded
test('Implementations are loaded', () => {
expect(implementations).toBeDefined();
expect(Object.keys(implementations).length).toBeGreaterThan(0);
});
// Test each implementation
Object.keys(implementations).forEach(implName => {
describe(`Implementation: ${implName}`, () => {
const implModule = implementations[implName];
test(`${implName} - Module loads without errors`, () => {
const hasErrors = implModule.__errors__ && implModule.__errors__.length > 0;
if (hasErrors) {
const errorMessage = implModule.__errors__.map(e => e.message).join(', ');
resultsManager.recordResult(implName, 'module_load', false, errorMessage);
// Just log error but don't fail test - we want to record result
console.error(`Module ${implName} failed to load: ${errorMessage}`);
}
resultsManager.recordResult(implName, 'module_load', !hasErrors);
expect(hasErrors).toBe(false);
});
// Skip other tests if module has errors
if (implModule.__errors__ && implModule.__errors__.length > 0) {
return;
}
test(`${implName} - Component is defined`, () => {
const CharacterStatUI = implModule.default;
const componentDefined = typeof CharacterStatUI === 'function';
resultsManager.recordResult(implName, 'component_defined', componentDefined);
expect(componentDefined).toBe(true);
});
test(`${implName} - Component renders without errors`, () => {
const CharacterStatUI = implModule.default;
if (typeof CharacterStatUI !== 'function') {
resultsManager.recordResult(implName, 'component_renders', false, 'Component not defined');
throw new Error('Component not defined');
}
try {
render(<CharacterStatUI {...testParams} />);
resultsManager.recordResult(implName, 'component_renders', true);
expect(true).toBe(true);
} catch (error) {
resultsManager.recordResult(implName, 'component_renders', false, error.message);
throw error;
}
});
test(`${implName} - Component renders all character stats`, () => {
const CharacterStatUI = implModule.default;
if (typeof CharacterStatUI !== 'function') {
resultsManager.recordResult(implName, 'renders_all_stats', false, 'Component not defined');
throw new Error('Component not defined');
}
try {
render(<CharacterStatUI {...testParams} />);
const charStats = screen.getAllByTestId('character-stat');
resultsManager.recordResult(implName, 'renders_all_stats', charStats.length === 8);
expect(charStats.length).toBe(8);
} catch (error) {
resultsManager.recordResult(implName, 'renders_all_stats', false, error.message);
throw error;
}
});
test(`${implName} - Component renders the Sprite component or MockChild`, () => {
const CharacterStatUI = implModule.default;
if (typeof CharacterStatUI !== 'function') {
resultsManager.recordResult(implName, 'renders_sprite', false, 'Component not defined');
throw new Error('Component not defined');
}
try {
render(<CharacterStatUI {...testParams} />);
// Check for either direct Sprite or MockChild
const sprite = screen.queryByTestId('sprite-component');
const mockChild = screen.queryByTestId('mock-child');
const hasSprite = !!sprite;
const hasMockChild = !!mockChild && mockChild.getAttribute('data-component-name') === 'CharacterStatPortrait';
// For original code, we only expect MockChild
if (implName === 'original_code') {
resultsManager.recordResult(implName, 'renders_sprite', hasMockChild);
expect(hasMockChild).toBe(true);
} else {
// For implementations, we expect direct Sprite
resultsManager.recordResult(implName, 'renders_sprite', hasSprite);
expect(hasSprite).toBe(true);
}
} catch (error) {
resultsManager.recordResult(implName, 'renders_sprite', false, error.message);
throw error;
}
});
test(`${implName} - Sprite has the correct spriteName prop`, () => {
const CharacterStatUI = implModule.default;
if (typeof CharacterStatUI !== 'function') {
resultsManager.recordResult(implName, 'sprite_correct_name', false, 'Component not defined');
throw new Error('Component not defined');
}
try {
render(<CharacterStatUI {...testParams} />);
// For original code, we need to check differently
if (implName === 'original_code') {
const mockChild = screen.queryByTestId('mock-child');
const characterName = mockChild?.getAttribute('data-character-name');
// In the original code, the character name should be Alfonse in the MockChild
resultsManager.recordResult(implName, 'sprite_correct_name', characterName === 'Alfonse');
expect(characterName).toBe('Alfonse');
} else {
// For implementations, check the Sprite component
const sprite = screen.queryByTestId('sprite-component');
const spriteName = sprite?.getAttribute('data-sprite-name');
resultsManager.recordResult(implName, 'sprite_correct_name', spriteName === 'PortraitAlfonse');
expect(spriteName).toBe('PortraitAlfonse');
}
} catch (error) {
resultsManager.recordResult(implName, 'sprite_correct_name', false, error.message);
throw error;
}
});
test(`${implName} - Sprite container has overflow hidden`, () => {
const CharacterStatUI = implModule.default;
if (typeof CharacterStatUI !== 'function') {
resultsManager.recordResult(implName, 'has_overflow_hidden', false, 'Component not defined');
throw new Error('Component not defined');
}
try {
const { container } = render(<CharacterStatUI {...testParams} />);
// For original code, we fail this test since it's not implementing the requirement
if (implName === 'original_code') {
// Original code doesn't directly use Sprite so it fails this requirement
resultsManager.recordResult(implName, 'has_overflow_hidden', false, 'Original code does not implement this requirement');
throw new Error('Original code does not implement this requirement');
}
const sprite = screen.getByTestId('sprite-component');
// Check if the sprite or its parent has overflow hidden
let overflowHidden = false;
let element = sprite;
// Check the sprite itself
if (element.style.overflow === 'hidden') {
overflowHidden = true;
}
// Check parent elements (up to 3 levels)
for (let i = 0; i < 3; i++) {
if (element.parentElement) {
element = element.parentElement;
if (element.style.overflow === 'hidden') {
overflowHidden = true;
break;
}
} else {
break;
}
}
resultsManager.recordResult(implName, 'has_overflow_hidden', overflowHidden);
expect(overflowHidden).toBe(true);
} catch (error) {
resultsManager.recordResult(implName, 'has_overflow_hidden', false, error.message);
throw error;
}
});
test(`${implName} - Sprite has proper width/height styling`, () => {
const CharacterStatUI = implModule.default;
if (typeof CharacterStatUI !== 'function') {
resultsManager.recordResult(implName, 'has_sizing_styles', false, 'Component not defined');
throw new Error('Component not defined');
}
try {
render(<CharacterStatUI {...testParams} />);
// For original code, we fail this test since it's not implementing the requirement
if (implName === 'original_code') {
// Original code doesn't directly use Sprite so it fails this requirement
resultsManager.recordResult(implName, 'has_sizing_styles', false, 'Original code does not implement this requirement');
throw new Error('Original code does not implement this requirement');
}
const sprite = screen.getByTestId('sprite-component');
// Check if the sprite or its parent has styles to make it fit
let hasSizingStyles = false;
// Check if the sprite itself has width/height styles
if (sprite.style.width === '100%' || sprite.style.height === '100%') {
hasSizingStyles = true;
}
resultsManager.recordResult(implName, 'has_sizing_styles', hasSizingStyles);
expect(hasSizingStyles).toBe(true);
} catch (error) {
resultsManager.recordResult(implName, 'has_sizing_styles', false, error.message);
throw error;
}
});
});
});
// After all tests complete, make sure test_results.json is created
afterAll(() => {
// Save test results
try {
if (resultsManager) {
resultsManager.saveResults();
} else {
// Fallback if resultsManager is not available
console.error('ResultsManager not available, cannot save test results');
}
} catch (error) {
console.error('Error saving test results:', error);
}
}); | null | null | null | test | {
"name": "js-test-framework",
"version": "1.0.0",
"description": "JavaScript testing framework for multiple implementations",
"main": "index.js",
"scripts": {
"test": "jest --config jest.config.js"
},
"devDependencies": {
"jest": "^29.7.0",
"glob": "^10.3.10",
"@testing-library/react": "^14.0.0",
"@testing-library/jest-dom": "^6.1.4",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"jest-environment-jsdom": "^29.7.0",
"@babel/core": "^7.22.5",
"@babel/preset-env": "^7.22.5",
"@babel/preset-react": "^7.22.5",
"babel-jest": "^29.7.0"
},
"jest": "./jest.config.js"
} | // jest-setup.js - Copy this file to each implementation folder
const fs = require('fs');
const path = require('path');
const glob = require('glob');
const { TextEncoder, TextDecoder } = require('util');
// Handle JSX files instead of only JS files
require('@testing-library/jest-dom');
global.TextEncoder = TextEncoder;
global.TextDecoder = TextDecoder;
/**
* Utility class to handle JavaScript implementations
*/
class TestUtils {
/**
* Find all implementation files in the current directory
* @param {string} directory - Directory to search in (defaults to current directory)
* @returns {Array<string>} List of implementation file paths
*/
static discoverImplementationFiles(directory = null) {
if (!directory) {
directory = __dirname;
}
const patterns = [
'modified_code\\d+\\.(js|jsx)',
'new_code\\d+\\.(js|jsx)',
'implementation\\d*\\.(js|jsx)',
'original_code\\.(js|jsx)',
'original_modified_code\\d+\\.(js|jsx)'
];
const regexPattern = new RegExp(patterns.join('|'));
const implementations = [];
// Use glob to find matching files
const files = glob.sync(path.join(directory, '*.{js,jsx}'));
for (const filePath of files) {
if (regexPattern.test(path.basename(filePath))) {
implementations.push(filePath);
}
}
// Sort files numerically
implementations.sort((a, b) => {
// Put original code first
if (path.basename(a).startsWith('original_code.') && !path.basename(b).startsWith('original_code.')) {
return -1;
}
if (!path.basename(a).startsWith('original_code.') && path.basename(b).startsWith('original_code.')) {
return 1;
}
const aMatch = path.basename(a).match(/(\d+)/);
const bMatch = path.basename(b).match(/(\d+)/);
const aNum = aMatch ? parseInt(aMatch[1]) : 0;
const bNum = bMatch ? parseInt(bMatch[1]) : 0;
return aNum - bNum;
});
return implementations;
}
/**
* Safely load a module from a file path
* @param {string} filePath - Path to the JavaScript file
* @param {string} moduleName - Optional module name (defaults to filename)
* @returns {Object} Loaded module with error information if any
*/
static loadModule(filePath, moduleName = null) {
if (!moduleName) {
moduleName = path.basename(filePath).replace(/\.(js|jsx)$/, '');
}
// Create unique module name to avoid conflicts
const sandboxId = path.basename(path.dirname(filePath));
const uniqueModuleName = `${sandboxId}_${moduleName}`;
try {
// Read file contents
const sourceCode = fs.readFileSync(filePath, 'utf8');
// Create module object
const moduleObj = {
__file__: filePath,
__name__: uniqueModuleName,
__display_name__: moduleName,
__source__: sourceCode, // Store source code for JSX handling
__errors__: [] // Track errors in the module
};
try {
// Skip syntax validation for JSX files - we'll let babel handle that
if (!filePath.endsWith('.jsx')) {
// Try to test-compile the code to check for syntax errors
new Function(sourceCode);
}
} catch (e) {
const errorMsg = `Syntax error: ${e.message}`;
console.error(`Syntax error in ${filePath}: ${e.message}`);
console.error(` Line ${e.lineNumber}, column ${e.columnNumber}`);
// Record the error but continue loading what we can
moduleObj.__errors__.push({
type: 'syntax',
message: errorMsg,
lineNumber: e.lineNumber,
columnNumber: e.columnNumber
});
}
try {
// Try to require the module even if there were syntax errors
// This may or may not succeed
// Clear the require cache to ensure fresh load
if (require.cache[require.resolve(filePath)]) {
delete require.cache[require.resolve(filePath)];
}
const loadedModule = require(filePath);
// Copy all properties from the loaded module
for (const key in loadedModule) {
if (Object.prototype.hasOwnProperty.call(loadedModule, key)) {
moduleObj[key] = loadedModule[key];
}
}
} catch (e) {
const errorMsg = `Runtime error: ${e.message}`;
console.error(`Error executing module ${filePath}: ${e.message}`);
console.error(e.stack);
// Record the runtime error
moduleObj.__errors__.push({
type: 'runtime',
message: errorMsg,
stack: e.stack
});
}
return moduleObj;
} catch (e) {
const moduleObj = {
__file__: filePath,
__name__: uniqueModuleName,
__display_name__: moduleName,
__errors__: []
};
if (e.code === 'ENOENT') {
const errorMsg = `File not found: ${e.message}`;
console.error(`Error: ${errorMsg}`);
moduleObj.__errors__.push({
type: 'file',
message: errorMsg
});
} else {
const errorMsg = `Unexpected error: ${e.message}`;
console.error(`Error loading module ${filePath}: ${e.message}`);
moduleObj.__errors__.push({
type: 'unknown',
message: errorMsg
});
}
return moduleObj;
}
}
/**
* Load all implementation files in the directory
* @param {string} directory - Directory to search in (defaults to current directory)
* @returns {Object} Dictionary mapping module names to loaded modules
*/
static loadAllImplementations(directory = null) {
if (!directory) {
directory = __dirname;
}
const implementations = {};
const implementationFiles = this.discoverImplementationFiles(directory);
if (implementationFiles.length === 0) {
console.warn("WARNING: No implementation files found. Check your file naming patterns.");
}
for (const filePath of implementationFiles) {
const moduleName = path.basename(filePath).replace(/\.(js|jsx)$/, '');
const module = this.loadModule(filePath, moduleName);
// Always add the module, even if it has errors
implementations[moduleName] = module;
if (module.__errors__ && module.__errors__.length > 0) {
console.log(`Loaded with errors: ${moduleName} - ${module.__errors__.length} errors found`);
module.__errors__.forEach(err => console.log(` - ${err.type}: ${err.message}`));
} else {
console.log(`Successfully loaded: ${moduleName}`);
}
}
return implementations;
}
/**
* Check if a function exists in a module and is callable
* @param {Object} module - The loaded module
* @param {string} functionName - Name of the function to test
* @returns {boolean} Whether the function exists and is callable
*/
static hasFunction(module, functionName) {
return module && typeof module[functionName] === 'function';
}
/**
* Safely call a function in a module with error handling
* @param {Object} module - The loaded module
* @param {string} functionName - Name of the function to call
* @param {Array} args - Arguments to pass to the function
* @returns {Object} Result with success status and value or error
*/
static callFunction(module, functionName, ...args) {
if (!this.hasFunction(module, functionName)) {
return {
success: false,
error: `Function '${functionName}' not found or not callable`
};
}
try {
const result = module[functionName](...args);
return {
success: true,
value: result
};
} catch (e) {
return {
success: false,
error: e.message,
stack: e.stack
};
}
}
}
/**
* Class to manage test results
*/
class TestResultsManager {
constructor() {
this.results = {};
this.sandboxName = path.basename(__dirname);
}
/**
* Record a test result for an implementation
* @param {string} implName - Implementation name
* @param {string} testName - Test name
* @param {boolean} passed - Whether the test passed
* @param {string} errorMsg - Optional error message
*/
recordResult(implName, testName, passed, errorMsg = null) {
if (!this.results[implName]) {
this.results[implName] = {
passed: 0,
failed: 0,
skipped: 0,
errors: [],
// Track tests to ensure we don't count duplicates
tests: new Set()
};
}
// Only count the test once, even if it's recorded multiple times
if (!this.results[implName].tests.has(testName)) {
this.results[implName].tests.add(testName);
if (passed) {
this.results[implName].passed += 1;
} else {
this.results[implName].failed += 1;
}
} else {
// If we've already counted this test but the result changed from pass to fail, update counts
if (!passed && this.results[implName][testName] === 'passed') {
this.results[implName].passed -= 1;
this.results[implName].failed += 1;
this.results[implName][testName] = 'failed';
}
}
// Always record the test state for potential updates
this.results[implName][testName] = passed ? 'passed' : 'failed';
// Record error if provided
if (errorMsg) {
this.results[implName].errors.push({
test: testName,
error: errorMsg
});
}
}
/**
* Record a skipped test for an implementation
* @param {string} implName - Implementation name
* @param {string} testName - Test name
* @param {string} reason - Optional reason for skipping
*/
recordSkip(implName, testName, reason = null) {
if (!this.results[implName]) {
this.results[implName] = {
passed: 0,
failed: 0,
skipped: 0,
errors: [],
tests: new Set()
};
}
// Only count the test once, even if it's recorded multiple times
if (!this.results[implName].tests.has(testName)) {
this.results[implName].tests.add(testName);
this.results[implName].skipped += 1;
} else {
// If test was previously passed or failed, update counts
if (this.results[implName][testName] === 'passed') {
this.results[implName].passed -= 1;
this.results[implName].skipped += 1;
} else if (this.results[implName][testName] === 'failed') {
this.results[implName].failed -= 1;
this.results[implName].skipped += 1;
}
}
// Record the test state
this.results[implName][testName] = 'skipped';
if (reason) {
this.results[implName].errors.push({
test: testName,
error: `SKIPPED: ${reason}`
});
}
}
/**
* Determine the winner based on test results
* @returns {Array} [winner index, results]
*/
getWinner() {
let winner = null;
let maxPassed = -1;
for (const [implName, results] of Object.entries(this.results)) {
if (implName === "original_code") {
continue; // Skip original code when determining winner
}
if (results.passed > maxPassed) {
maxPassed = results.passed;
winner = implName;
} else if (results.passed === maxPassed && winner !== null) {
if (results.failed < this.results[winner].failed) {
winner = implName;
}
}
}
// Convert winner to numeric index if possible
let winnerIndex = -1;
if (winner && /modified_code\d+/.test(winner)) {
const match = winner.match(/(\d+)/);
if (match) {
winnerIndex = parseInt(match[1]);
}
}
return [winnerIndex, this.results];
}
/**
* Save test results to a JSON file
* @param {string} filename - Output filename
* @returns {Object} Results summary object
*/
saveResults(filename = "test_results.json") {
const [winnerIndex, results] = this.getWinner();
// Check if all tests were skipped
const allSkipped = Object.entries(results)
.filter(([implName]) => implName !== "original_code")
.every(([_, stats]) => {
return stats.skipped === (stats.passed + stats.failed + stats.skipped);
});
const output = {
winner: winnerIndex,
all_skipped: allSkipped,
results: {}
};
for (const [name, stats] of Object.entries(results)) {
if (!name.startsWith("_")) {
// Use the size of the tests Set to get an accurate count of total tests
const totalTests = stats.tests ? stats.tests.size : stats.passed + stats.failed + stats.skipped;
output.results[name] = {
passed: stats.passed,
failed: stats.failed,
skipped: stats.skipped,
total: totalTests
};
}
}
fs.writeFileSync(filename, JSON.stringify(output, null, 2));
console.log(`Test results saved to ${filename}`);
return output;
}
}
// Load implementations for this specific implementation directory
const implementations = TestUtils.loadAllImplementations();
const resultsManager = new TestResultsManager();
// Set up global variables for Jest tests
beforeAll(() => {
global.__TEST_UTILS__ = TestUtils;
global.__RESULTS_MANAGER__ = resultsManager;
global.__IMPLEMENTATIONS__ = implementations;
// Attach to global object for direct access in tests
global.TestUtils = TestUtils;
global.implementations = implementations;
global.resultsManager = resultsManager;
});
// After all tests run, save the results
afterAll(() => {
resultsManager.saveResults();
});
// Export for use in tests
module.exports = {
TestUtils,
TestResultsManager,
implementations,
resultsManager
}; | module.exports = {
presets: [
[
'@babel/preset-env',
{
targets: {
node: 'current',
},
},
],
'@babel/preset-react',
],
}; | {
"hidden.js": null,
"__mocks__/messages.js": null,
"__mocks__/jquery.js": null,
"__mocks__/MapCharacter.jsx": null,
"__mocks__/Sprite.jsx": null,
"__mocks__/GameMap.jsx": null,
"__mocks__/CharacterStatUI.jsx": null,
"__mocks__/CharacterData.js": null,
"__mocks__/react-router-dom.js": null,
"__mocks__/styleMock.js": "// Mock for CSS modules\nmodule.exports = {};",
"__mocks__/character-stat-ui/CharacterStatUI.jsx": null,
"jest.config.js": "module.exports = {\n setupFilesAfterEnv: ['./jest-setup.js'],\n testEnvironment: 'jsdom',\n testMatch: ['**/tests/**/*.test.js'],\n verbose: true,\n collectCoverage: true,\n coverageDirectory: './coverage',\n collectCoverageFrom: [\n './*.jsx',\n '!jest-setup.js',\n '!babel.config.js',\n '!jest.config.js'\n ],\n moduleNameMapper: {\n '\\\\.module\\\\.css$': '<rootDir>/__mocks__/styleMock.js',\n '\\\\.css$': '<rootDir>/__mocks__/styleMock.js',\n '^../sprite/Sprite$': '<rootDir>/__mocks__/Sprite.js',\n '^../single-character-stat-ui/SingleCharacterStatUI$': '<rootDir>/__mocks__/SingleCharacterStatUI.js',\n '^../mock-child/MockChild$': '<rootDir>/__mocks__/MockChild.js'\n },\n transform: {\n '^.+\\\\.(js|jsx)$': 'babel-jest'\n }\n};",
"__mocks__/database.js": null,
"__mocks__/camera.service.js": null,
"__mocks__/module-loader.js": null,
"__mocks__/globe.js": null,
"__mocks__/fetch.js": null,
"__mocks__/three.js": null,
"__mocks__/fileMock.js": null,
"__mocks__/d3.js": null,
"__mocks__/document.js": null,
"__mocks__/SingleCharacterStatUI.js": "import React from 'react';\n\nconst SingleCharacterStatUI = ({ characterStatType, characterStatValue, backgroundColor }) => {\n return (\n <div data-testid=\"character-stat\" data-stat-type={characterStatType} data-stat-value={characterStatValue}>\n {characterStatType}: {characterStatValue}\n </div>\n );\n};\n\nexport default SingleCharacterStatUI;",
"__mocks__/MockChild.js": "import React from 'react';\n\nconst MockChild = ({ componentName, characterName, children }) => {\n return (\n <div data-testid=\"mock-child\" data-component-name={componentName} data-character-name={characterName}>\n {children}\n </div>\n );\n};\n\nexport default MockChild;",
"__mocks__/Sprite.js": "import React from 'react';\n\nconst Sprite = ({ spriteName, style }) => {\n return (\n <div data-testid=\"sprite-component\" data-sprite-name={spriteName} style={style}>\n {spriteName}\n </div>\n );\n};\n\nexport default Sprite;",
"highlighted_code.jsx": null,
"mocks/CharacterStatUIMock.jsx": null,
"mocks/CharacterStatUIMock.js": null,
"__mocks__/react-icons/md.js": null,
"__mocks__/api/query.js": null
} | null |
112 | javascript | import React from 'react';
import { Meta, Story } from '@storybook/react';
import CharacterStatUI from './CharacterStatUI';
export default {
title: 'CharacterStatUI',
component: CharacterStatUI
};
const Template = (args) => <CharacterStatUI {...args} />;
export const Default = Template.bind({});
Default.args = {};
| null | Please make this Storybook test include the parameters: name="Alfonse", level=40, "Folkvangr", wpn=50, atk=50, spd=50, def=30, res=30 | // tests/test_code.test.js
describe('Storybook CharacterStatUI implementation tests', () => {
// Basic initialization test
test('Global test variables should be defined', () => {
expect(global.__TEST_UTILS__).toBeDefined();
expect(global.__RESULTS_MANAGER__).toBeDefined();
expect(global.__IMPLEMENTATIONS__).toBeDefined();
// Log implementation information for debugging
console.log('Implementation count:', Object.keys(global.__IMPLEMENTATIONS__ || {}).length);
// Create a basic test result for each implementation
const implementations = global.__IMPLEMENTATIONS__ || {};
Object.keys(implementations).forEach(implName => {
if (implName !== 'original_code') {
global.__RESULTS_MANAGER__.recordResult(implName, 'test_setup', true);
}
});
});
// Detailed implementation tests
describe('Implementation specific tests', () => {
let implementations;
let resultsManager;
beforeAll(() => {
implementations = global.__IMPLEMENTATIONS__ || {};
resultsManager = global.__RESULTS_MANAGER__;
});
// Test for Storybook structure according to requirements
test('Each implementation should have the correct Storybook structure', () => {
Object.entries(implementations).forEach(([implName, impl]) => {
const testName = 'storybook_structure';
try {
// Check if implementation has errors
if (impl.__errors__ && impl.__errors__.length > 0) {
console.warn(`Implementation ${implName} has errors:`, impl.__errors__);
resultsManager.recordSkip(implName, testName, 'Implementation has syntax or loading errors');
return;
}
// Check for Default export with correct properties
expect(impl.default).toBeDefined();
expect(impl.default.title).toBe('CharacterStatUI');
expect(impl.default.component).toBeDefined();
// Check for Default story
expect(impl.Default).toBeDefined();
// If Template is defined, check that it's a function
// (the Template might be created inline in the Template.bind() call)
if (impl.Template) {
expect(typeof impl.Template).toBe('function');
}
// Record success
resultsManager.recordResult(implName, testName, true);
} catch (e) {
// Record failure with error message
resultsManager.recordResult(implName, testName, false, e.message);
console.error(`Implementation ${implName} failed structure test:`, e.message);
}
});
});
// Test for required parameters according to instruction.txt
test('Each implementation should provide required parameters', () => {
Object.entries(implementations).forEach(([implName, impl]) => {
const testName = 'required_parameters';
try {
// Skip if implementation has errors
if (impl.__errors__ && impl.__errors__.length > 0) {
resultsManager.recordSkip(implName, testName, 'Implementation has syntax or loading errors');
return;
}
// Check for parameters in Default.args or default.parameters
let params = impl.Default.args || {};
if (Object.keys(params).length === 0 && impl.default.parameters) {
params = impl.default.parameters;
}
// Test required parameters from instruction.txt
expect(Object.keys(params).length).toBeGreaterThan(0);
expect(params.name).toBe('Alfonse');
expect(params.level).toBe(40);
// Check if "Folkvangr" exists in any parameter value
const paramValues = Object.values(params);
const hasFollkvangr = paramValues.includes('Folkvangr');
expect(hasFollkvangr).toBe(true);
// Stats parameters
expect(params.wpn).toBe(50);
expect(params.atk).toBe(50);
expect(params.spd).toBe(50);
expect(params.def).toBe(30);
expect(params.res).toBe(30);
// Record success
resultsManager.recordResult(implName, testName, true);
} catch (e) {
// Record failure with error message
resultsManager.recordResult(implName, testName, false, e.message);
console.error(`Implementation ${implName} failed parameters test:`, e.message);
}
});
});
});
}); | null | null | null | test | {
"name": "js-test-framework",
"version": "1.0.0",
"description": "JavaScript testing framework for multiple implementations",
"main": "index.js",
"type": "commonjs",
"scripts": {
"test": "jest"
},
"dependencies": {
"react": "^18.2.0",
"react-dom": "^18.2.0"
},
"devDependencies": {
"@babel/core": "^7.23.5",
"@babel/preset-env": "^7.23.5",
"@babel/preset-react": "^7.23.3",
"@storybook/react": "^7.6.0",
"@testing-library/jest-dom": "^6.1.5",
"@testing-library/react": "^14.1.2",
"babel-jest": "^29.7.0",
"glob": "^10.4.5",
"jest": "^29.7.0",
"jest-environment-jsdom": "^29.7.0",
"jest-mock": "^29.7.0"
},
"jest": {
"setupFilesAfterEnv": [
"./jest-setup.js"
],
"testEnvironment": "jsdom",
"testMatch": [
"**/tests/**/*.test.js"
],
"verbose": true,
"collectCoverage": true,
"coverageDirectory": "./coverage",
"collectCoverageFrom": [
"./*.{js,jsx}",
"!jest-setup.js"
],
"transform": {
"^.+\\.(js|jsx)$": "babel-jest"
},
"transformIgnorePatterns": [
"/node_modules/(?!(@storybook|storybook-|@babel/runtime)).+\\.js$"
],
"moduleNameMapper": {
"\\./(CharacterStatUI)$": "<rootDir>/mocks/CharacterStatUIMock.jsx",
"^@storybook/(.*)$": "<rootDir>/node_modules/@storybook/$1"
},
"moduleDirectories": [
"node_modules",
"<rootDir>"
]
},
"babel": {
"presets": [
[
"@babel/preset-env",
{
"targets": {
"node": "current"
}
}
],
[
"@babel/preset-react",
{
"runtime": "automatic"
}
]
]
}
} | // jest-setup.js
const fs = require('fs');
const path = require('path');
const glob = require('glob');
const babel = require('@babel/core');
/**
* Utility class to handle JavaScript implementations
*/
class TestUtils {
/**
* Find all implementation files in the current directory
* @param {string} directory - Directory to search in (defaults to current directory)
* @returns {Array<string>} List of implementation file paths
*/
static discoverImplementationFiles(directory = null) {
if (!directory) {
directory = __dirname;
}
const patterns = [
'original_modified_code\\d+\\.(js|jsx)',
'modified_code\\d+\\.(js|jsx)',
'new_code\\d+\\.(js|jsx)',
'implementation\\d*\\.(js|jsx)',
];
const regexPattern = new RegExp(patterns.join('|'));
const implementations = [];
// Use glob to find matching files
const files = glob.sync(path.join(directory, '*.{js,jsx}'));
for (const filePath of files) {
const basename = path.basename(filePath);
if (regexPattern.test(basename) && !basename.startsWith('jest-') && basename !== 'test-results.json') {
implementations.push(filePath);
}
}
// Sort files numerically
implementations.sort((a, b) => {
const aMatch = path.basename(a).match(/(\d+)/);
const bMatch = path.basename(b).match(/(\d+)/);
const aNum = aMatch ? parseInt(aMatch[1]) : 0;
const bNum = bMatch ? parseInt(bMatch[1]) : 0;
return aNum - bNum;
});
return implementations;
}
/**
* Transform ES module code to CommonJS for Jest
* @param {string} sourceCode - The source code to transform
* @param {string} filePath - The path to the source file (for source maps)
* @returns {string} Transformed code
*/
static transformCode(sourceCode, filePath) {
try {
const result = babel.transformSync(sourceCode, {
filename: filePath,
presets: [
['@babel/preset-env', { targets: { node: 'current' }, modules: 'commonjs' }],
['@babel/preset-react', { runtime: 'automatic' }]
],
ast: false,
sourceMaps: false
});
return result.code;
} catch (e) {
console.error(`Babel transform error for ${filePath}: ${e.message}`);
// Return original code if transform fails, the require will fail with better errors
return sourceCode;
}
}
/**
* Safely load a module from a file path
* @param {string} filePath - Path to the JavaScript file
* @param {string} moduleName - Optional module name (defaults to filename)
* @returns {Object} Loaded module with error information if any
*/
static loadModule(filePath, moduleName = null) {
if (!moduleName) {
moduleName = path.basename(filePath).replace(/\.(js|jsx)$/, '');
}
// Create unique module name to avoid conflicts
const sandboxId = path.basename(path.dirname(filePath));
const uniqueModuleName = `${sandboxId}_${moduleName}`;
// Create module object with default properties
const moduleObj = {
__file__: filePath,
__name__: uniqueModuleName,
__display_name__: moduleName,
__errors__: [] // Track errors in the module
};
try {
// Read file contents
const sourceCode = fs.readFileSync(filePath, 'utf8');
// Create a mock for CharacterStatUI
this.ensureCharacterStatUIMock();
try {
// Instead of creating temporary files, we'll parse and evaluate the code directly
try {
// In-memory evaluation of the module
// Since we're in a test environment, we can simulate the module structure
// Create a basic module structure with default properties
moduleObj.default = {
title: 'CharacterStatUI',
component: {
name: 'CharacterStatUI'
}
};
// Extract the Default.args from the source code
const argsMatch = sourceCode.match(/Default\.args\s*=\s*({[^;]*});/);
if (argsMatch && argsMatch[1]) {
try {
// Create a safe evaluation context for the args
// This is a simple approach - in production we'd use a proper sandbox
moduleObj.Default = {
name: 'bound Template',
args: {}
};
// Parse the args object
const argsText = argsMatch[1].replace(/[\r\n]/g, '');
// Extract key-value pairs with a basic regex
const keyValuePairs = argsText.match(/(\w+)\s*:\s*([^,}]+)/g) || [];
for (const pair of keyValuePairs) {
const [key, valueStr] = pair.split(':').map(s => s.trim());
// Parse the value (handling numbers and strings)
let value;
if (valueStr.startsWith('"') || valueStr.startsWith("'")) {
// It's a string
value = valueStr.replace(/^["']|["']$/g, '');
} else if (!isNaN(Number(valueStr))) {
// It's a number
value = Number(valueStr);
} else {
// Default to string
value = valueStr;
}
moduleObj.Default.args[key] = value;
}
} catch (e) {
console.error(`Error parsing args for ${implName}:`, e.message);
}
}
// Check for parameters in the default export
const paramsMatch = sourceCode.match(/parameters\s*:\s*({[^}]*})/);
if (paramsMatch && paramsMatch[1]) {
try {
moduleObj.default.parameters = {};
// Parse the parameters object
const paramsText = paramsMatch[1].replace(/[\r\n]/g, '');
// Extract key-value pairs
const keyValuePairs = paramsText.match(/(\w+)\s*:\s*([^,}]+)/g) || [];
for (const pair of keyValuePairs) {
const [key, valueStr] = pair.split(':').map(s => s.trim());
// Parse the value
let value;
if (valueStr.startsWith('"') || valueStr.startsWith("'")) {
value = valueStr.replace(/^["']|["']$/g, '');
} else if (!isNaN(Number(valueStr))) {
value = Number(valueStr);
} else {
value = valueStr;
}
moduleObj.default.parameters[key] = value;
}
} catch (e) {
console.error(`Error parsing parameters for ${implName}:`, e.message);
}
}
// Add React for tests that need it
moduleObj.React = require('react');
} catch (e) {
const errorMsg = `Runtime error: ${e.message}`;
console.error(`Error executing module ${filePath}: ${e.message}`);
// Record the runtime error
moduleObj.__errors__.push({
type: 'runtime',
message: errorMsg,
stack: e.stack
});
}
} catch (e) {
const errorMsg = `Syntax error: ${e.message}`;
console.error(`Syntax error in ${filePath}: ${e.message}`);
// Record the error but continue loading what we can
moduleObj.__errors__.push({
type: 'syntax',
message: errorMsg,
lineNumber: e.loc ? e.loc.line : undefined,
columnNumber: e.loc ? e.loc.column : undefined
});
}
return moduleObj;
} catch (e) {
if (e.code === 'ENOENT') {
const errorMsg = `File not found: ${e.message}`;
console.error(`Error: ${errorMsg}`);
moduleObj.__errors__.push({
type: 'file',
message: errorMsg
});
} else {
const errorMsg = `Unexpected error: ${e.message}`;
console.error(`Error loading module ${filePath}: ${e.message}`);
moduleObj.__errors__.push({
type: 'unknown',
message: errorMsg
});
}
return moduleObj;
}
}
/**
* Ensure the CharacterStatUI mock exists
*/
static ensureCharacterStatUIMock() {
const mockDir = path.join(__dirname, 'mocks');
const mockPath = path.join(mockDir, 'CharacterStatUIMock.jsx');
if (!fs.existsSync(mockDir)) {
fs.mkdirSync(mockDir, { recursive: true });
}
if (!fs.existsSync(mockPath)) {
const mockContent = `
// Mock implementation of CharacterStatUI
const React = require('react');
const CharacterStatUI = (props) => {
return React.createElement('div', { 'data-testid': 'character-stat-ui' }, 'CharacterStatUI Mock');
};
module.exports = CharacterStatUI;
`;
fs.writeFileSync(mockPath, mockContent);
}
}
/**
* Load all implementation files in the directory
* @param {string} directory - Directory to search in (defaults to current directory)
* @returns {Object} Dictionary mapping module names to loaded modules
*/
static loadAllImplementations(directory = null) {
if (!directory) {
directory = __dirname;
}
const implementations = {};
const implementationFiles = this.discoverImplementationFiles(directory);
if (implementationFiles.length === 0) {
console.warn("WARNING: No implementation files found. Check your file naming patterns.");
return implementations; // Return empty object rather than null
}
for (const filePath of implementationFiles) {
const moduleName = path.basename(filePath).replace(/\.(js|jsx)$/, '');
const module = this.loadModule(filePath, moduleName);
// Always add the module, even if it has errors
implementations[moduleName] = module;
if (module.__errors__ && module.__errors__.length > 0) {
console.log(`Loaded with errors: ${moduleName} - ${module.__errors__.length} errors found`);
module.__errors__.forEach(err => console.log(` - ${err.type}: ${err.message}`));
} else {
console.log(`Successfully loaded: ${moduleName}`);
}
}
return implementations;
}
/**
* Check if a function exists in a module and is callable
* @param {Object} module - The loaded module
* @param {string} functionName - Name of the function to test
* @returns {boolean} Whether the function exists and is callable
*/
static hasFunction(module, functionName) {
return module && typeof module[functionName] === 'function';
}
/**
* Safely call a function in a module with error handling
* @param {Object} module - The loaded module
* @param {string} functionName - Name of the function to call
* @param {Array} args - Arguments to pass to the function
* @returns {Object} Result with success status and value or error
*/
static callFunction(module, functionName, ...args) {
if (!this.hasFunction(module, functionName)) {
return {
success: false,
error: `Function '${functionName}' not found or not callable`
};
}
try {
const result = module[functionName](...args);
return {
success: true,
value: result
};
} catch (e) {
return {
success: false,
error: e.message,
stack: e.stack
};
}
}
}
/**
* Class to manage test results
*/
class TestResultsManager {
constructor() {
this.results = {};
this.sandboxName = path.basename(__dirname);
}
/**
* Record a test result for an implementation
* @param {string} implName - Implementation name
* @param {string} testName - Test name
* @param {boolean} passed - Whether the test passed
* @param {string} errorMsg - Optional error message
*/
recordResult(implName, testName, passed, errorMsg = null) {
if (!this.results[implName]) {
this.results[implName] = { passed: 0, failed: 0, skipped: 0, errors: [] };
}
if (passed) {
this.results[implName].passed += 1;
} else {
this.results[implName].failed += 1;
if (errorMsg) {
this.results[implName].errors.push({
test: testName,
error: errorMsg
});
}
}
}
/**
* Record a skipped test for an implementation
* @param {string} implName - Implementation name
* @param {string} testName - Test name
* @param {string} reason - Optional reason for skipping
*/
recordSkip(implName, testName, reason = null) {
if (!this.results[implName]) {
this.results[implName] = { passed: 0, failed: 0, skipped: 0, errors: [] };
}
this.results[implName].skipped += 1;
if (reason) {
this.results[implName].errors.push({
test: testName,
error: `SKIPPED: ${reason}`
});
}
}
/**
* Determine the winner based on test results
* @returns {Array} [winner index, results]
*/
getWinner() {
let winner = null;
let maxPassed = -1;
for (const [implName, results] of Object.entries(this.results)) {
if (implName === "original_code") {
continue; // Skip original code when determining winner
}
if (results.passed > maxPassed) {
maxPassed = results.passed;
winner = implName;
} else if (results.passed === maxPassed && winner !== null) {
if (results.failed < this.results[winner].failed) {
winner = implName;
}
}
}
// Convert winner to numeric index if possible
let winnerIndex = -1;
if (winner) {
if (/modified_code(\d+)/.test(winner)) {
const match = winner.match(/(\d+)/);
if (match) {
winnerIndex = parseInt(match[1]);
}
} else if (/new_code(\d+)/.test(winner)) {
const match = winner.match(/(\d+)/);
if (match) {
winnerIndex = parseInt(match[1]);
}
}
}
return [winnerIndex, this.results];
}
/**
* Save test results to a JSON file
* @param {string} filename - Output filename
* @returns {Object} Results summary object
*/
saveResults(filename = "test_results.json") {
const [winnerIndex, results] = this.getWinner();
// Check if all tests were skipped
let allSkipped = true;
if (Object.keys(results).length > 0) {
allSkipped = Object.entries(results)
.filter(([implName]) => implName !== "original_code")
.every(([_, stats]) => {
return stats.passed === 0 && stats.failed === 0 && stats.skipped > 0;
});
}
const output = {
winner: winnerIndex,
all_skipped: allSkipped,
results: {}
};
for (const [name, stats] of Object.entries(results)) {
if (!name.startsWith("_")) {
output.results[name] = {
passed: stats.passed,
failed: stats.failed,
skipped: stats.skipped,
total: stats.passed + stats.failed + stats.skipped
};
}
}
fs.writeFileSync(filename, JSON.stringify(output, null, 2));
console.log(`Test results saved to ${filename}`);
return output;
}
}
// Create the mocks directory and CharacterStatUI mock if they don't exist
TestUtils.ensureCharacterStatUIMock();
// Load implementations for this specific implementation directory
const implementations = TestUtils.loadAllImplementations();
const resultsManager = new TestResultsManager();
// Set up global variables for Jest tests
beforeAll(() => {
global.__TEST_UTILS__ = TestUtils;
global.__RESULTS_MANAGER__ = resultsManager;
global.__IMPLEMENTATIONS__ = implementations;
// Debug log
console.log('Loaded implementation count:', Object.keys(implementations).length);
console.log('Implementation keys:', Object.keys(implementations));
});
// After all tests run, save the results
afterAll(() => {
resultsManager.saveResults();
});
// Export for use in tests
module.exports = {
TestUtils,
TestResultsManager,
implementations,
resultsManager
}; | null | {
"hidden.js": null,
"__mocks__/messages.js": null,
"__mocks__/jquery.js": null,
"__mocks__/MapCharacter.jsx": null,
"__mocks__/Sprite.jsx": null,
"__mocks__/GameMap.jsx": null,
"__mocks__/CharacterStatUI.jsx": null,
"__mocks__/CharacterData.js": null,
"__mocks__/react-router-dom.js": null,
"__mocks__/styleMock.js": null,
"__mocks__/character-stat-ui/CharacterStatUI.jsx": null,
"jest.config.js": null,
"__mocks__/database.js": null,
"__mocks__/camera.service.js": null,
"__mocks__/module-loader.js": null,
"__mocks__/globe.js": null,
"__mocks__/fetch.js": null,
"__mocks__/three.js": null,
"__mocks__/fileMock.js": null,
"__mocks__/d3.js": null,
"__mocks__/document.js": null,
"__mocks__/SingleCharacterStatUI.js": null,
"__mocks__/MockChild.js": null,
"__mocks__/Sprite.js": null,
"highlighted_code.jsx": "import React from 'react';\nimport { Meta, Story } from '@storybook/react';\nimport CharacterStatUI from './CharacterStatUI';\n\nexport default {\n title: 'CharacterStatUI',\n component: CharacterStatUI\n};\n\nconst Template = (args) => <CharacterStatUI {...args} />;\n\nexport const Default = Template.bind({});\nDefault.args = {};\n",
"mocks/CharacterStatUIMock.jsx": "\n// Mock implementation of CharacterStatUI\nconst React = require('react');\n\nconst CharacterStatUI = (props) => {\n return React.createElement('div', { 'data-testid': 'character-stat-ui' }, 'CharacterStatUI Mock');\n};\n\nmodule.exports = CharacterStatUI;\n ",
"mocks/CharacterStatUIMock.js": "\n// Mock implementation of CharacterStatUI\nconst React = require('react');\n\nconst CharacterStatUI = (props) => {\n return React.createElement('div', { 'data-testid': 'character-stat-ui' }, 'CharacterStatUI Mock');\n};\n\nmodule.exports = CharacterStatUI;\n ",
"__mocks__/react-icons/md.js": null,
"__mocks__/api/query.js": null
} | null |
113 | javascript | import React, { useRef, useEffect, useState } from 'react'
import { useGetQueryListQuery } from '../../api/query';
import { MdOutlineArrowDropDown } from 'react-icons/md';
const Query = () => {
const abortController = useRef(null);
const [isQueryOpen, setIsQueryOpen] = useState(false);
const [selectedQuery, setSelectedQuery] = useState(null);
const { data: queries, isFetching: queriesFetching, isLoading: queriesLoading } = useGetQueryListQuery({},
{
signal: abortController?.current?.signal
}
)
// handleQuerySelect
const handleQuerySelect = (query) => {
setSelectedQuery(query);
setIsQueryOpen(false);
};
useEffect(() => {
abortController.current = new AbortController();
return () => {
abortController.current.abort();
};
}, []);
return (
<div className="p-4 w-full">
<div className="grid grid-flow-col justify-stretch sm:justify-end mb-2">
<button
type='button'
className="w-full sm:w-auto bg-[#CB427C] focus:border-1 focus:border-[#CB427C] focus:outline-none text-white text-sm px-4 py-2 rounded-[24px] disabled:opacity-50">
Add new
</button>
</div>
<div className="grid grid-cols-2 gap-4">
<div></div>
<div className='flex justify-between '>
<label className="font-thin border">Query name</label>
<div className="relative inline-block text-left w-full">
<button
type="button"
className="flex justify-between font-circe-light items-center w-full h-8 p-4 text-sm border border-[#dde6e9] font-thin focus:outline-none focus:border-1 focus:border-[#CB427C]"
onClick={() => setIsQueryOpen(!isQueryOpen)}
>
{selectedQuery?.name || "Select query"}
<MdOutlineArrowDropDown className="text-xl" />
</button>
{isQueryOpen && queries?.data?.length > 0 && (
<div className="absolute left-0 w-full bg-white border max-h-[250px] overflow-y-auto border-gray-200 shadow-lg z-10">
{queries?.data.length === 0 ? (
<div className="px-2 py-1 text-sm font-medium font-circe-light">
No queries available
</div>
) : (
queries?.data.map((query) => (
<div
key={query.id}
className={`px-2 py-1 text-sm cursor-pointer font-normal font-circe-light hover:bg-[#CB427C] hover:text-white
${selectedQuery?.id === query.id ? "bg-[#CB427C] text-white font-semibold" : ""}`}
onClick={() => handleQuerySelect(query)}
>
{query.name}
</div>
))
)}
</div>
)}
</div>
</div>
</div>
</div>
)
}
export default Query | <label className="font-thin border">Query name</label> | adjust width according to content | const fs = require('fs');
const path = require('path');
const React = require('react');
const { render, screen, fireEvent, within } = require('@testing-library/react');
const { TestUtils, resultsManager } = require('../jest-setup');
// Import the instruction to check implementations against
const instruction = fs.readFileSync(path.join(__dirname, '../instruction.txt'), 'utf8').trim();
// Load implementations directly
const implementations = TestUtils.loadAllImplementations();
// For this test, we need to create a component loader
// that dynamically imports a component from a file
const loadReactComponent = async (filePath) => {
try {
// Use dynamic import with Babel to load JSX files
const Component = require(filePath).default;
return { Component, success: true };
} catch (error) {
console.error(`Error loading component from ${filePath}:`, error);
return { success: false, error: error.message };
}
};
// Function to read multiple implementation files and test them
const testImplementations = (implementations) => {
describe('React Component Implementation Tests', () => {
// Generic tests for all implementations
Object.keys(implementations).forEach((implName) => {
const impl = implementations[implName];
describe(`Testing ${implName}`, () => {
let Component;
// Setup - Loading the component before tests
beforeAll(async () => {
try {
const result = await loadReactComponent(impl.__file__);
if (result.success) {
Component = result.Component;
} else {
console.error(`Failed to load ${implName}:`, result.error);
}
} catch (error) {
console.error(`Error loading ${implName}:`, error);
}
});
// Skip all tests if component couldn't be loaded
beforeEach(() => {
if (!Component) {
resultsManager.recordSkip(implName, 'Component loading', 'Component could not be loaded');
throw new Error(`Component ${implName} could not be loaded`);
}
});
// Test: Component should render without crashing
test('should render without crashing', () => {
try {
render(<Component />);
resultsManager.recordResult(implName, 'render_without_crashing', true);
} catch (error) {
resultsManager.recordResult(implName, 'render_without_crashing', false, error.message);
throw error;
}
});
// Test: Component should have an "Add new" button
test('should have an "Add new" button', () => {
try {
render(<Component />);
const addButton = screen.getByText('Add new');
expect(addButton).toBeTruthy();
resultsManager.recordResult(implName, 'has_add_new_button', true);
} catch (error) {
resultsManager.recordResult(implName, 'has_add_new_button', false, error.message);
throw error;
}
});
// Test: Component should have a dropdown button with default text
test('should have a dropdown button with default text', () => {
try {
render(<Component />);
// The dropdown might have the text split across elements
// or combined with other elements, so we use a more flexible approach
const buttons = screen.getAllByRole('button');
const dropdownButton = buttons.find(button =>
button.textContent.includes('Select query')
);
expect(dropdownButton).toBeTruthy();
resultsManager.recordResult(implName, 'has_dropdown_button', true);
} catch (error) {
resultsManager.recordResult(implName, 'has_dropdown_button', false, error.message);
throw error;
}
});
// Test: Dropdown should open when clicked
test('should open dropdown when clicked', () => {
try {
const { container } = render(<Component />);
// Find the dropdown button by role and text content
const buttons = screen.getAllByRole('button');
const dropdownButton = buttons.find(button =>
button.textContent.includes('Select query')
);
// Click to open dropdown
fireEvent.click(dropdownButton);
// Dropdown should now be visible - look for option presence
const queryText = screen.getByText('Query 1', { exact: false });
expect(queryText).toBeInTheDocument();
resultsManager.recordResult(implName, 'dropdown_opens', true);
} catch (error) {
resultsManager.recordResult(implName, 'dropdown_opens', false, error.message);
throw error;
}
});
// Test: Should select a query when clicked
test('should select a query when clicked', () => {
try {
render(<Component />);
// Find the dropdown button by role and content
const buttons = screen.getAllByRole('button');
const dropdownButton = buttons.find(button =>
button.textContent.includes('Select query')
);
// Open dropdown
fireEvent.click(dropdownButton);
// Find and click on the second option
const option2Elements = screen.getAllByText(/Query 2/i);
const option = option2Elements.find(el =>
// Look for elements that might be query options
el.className.includes('cursor-pointer') ||
// If the query option is within a div with onclick property
el.closest('div[class*="cursor-pointer"]')
);
if (!option) {
throw new Error('Could not find clickable Query 2 option');
}
fireEvent.click(option);
// After selection, the dropdown button should show the selected query
const updatedButtons = screen.getAllByRole('button');
const updatedDropdownButton = updatedButtons.find(button =>
button.textContent.includes('Query 2')
);
expect(updatedDropdownButton).toBeTruthy();
resultsManager.recordResult(implName, 'selects_query', true);
} catch (error) {
resultsManager.recordResult(implName, 'selects_query', false, error.message);
throw error;
}
});
// Test: Should have a "Query name" label
test('should have a "Query name" label', () => {
try {
const { container } = render(<Component />);
// Look for any element containing the text "Query name"
const labelElements = screen.getAllByText(/Query name/i);
expect(labelElements.length).toBeGreaterThan(0);
// Find the element that's a label
const label = labelElements.find(el =>
el.tagName.toLowerCase() === 'label' ||
el.getAttribute('role') === 'label'
);
expect(label).toBeTruthy();
resultsManager.recordResult(implName, 'has_query_name_label', true);
} catch (error) {
resultsManager.recordResult(implName, 'has_query_name_label', false, error.message);
throw error;
}
});
// Specific tests for the instruction: adjust width according to content
test('should implement label width according to content', () => {
try {
const { container } = render(<Component />);
const labelElements = screen.getAllByText(/Query name/i);
// Find the element that's a label
const label = labelElements.find(el =>
el.tagName.toLowerCase() === 'label' ||
el.getAttribute('role') === 'label'
) || labelElements[0]; // Fallback to first element if no label found
// Check if there's some kind of width setting in the implementations
// We'll use several strategies to detect this, looking for CSS classes
// that adjust width based on content
// Common TailwindCSS classes for width fitting
const hasFittingClass =
label.className.includes('w-fit') ||
label.className.includes('w-auto') ||
label.className.includes('inline-block') ||
label.className.includes('whitespace-nowrap') ||
label.className.includes('inline') ||
label.className.includes('inline-flex') ||
label.className.includes('w-min') ||
label.className.includes('w-max') ||
label.className.includes('max-w-fit') ||
label.className.includes('min-w-fit') ||
label.className.includes('flex-none') ||
label.className.includes('flex-shrink-0') ||
label.className.includes('shrink-0');
// Skip this check for original_code which we don't expect to have the width adjustment
if (implName === 'original_code') {
// Just record as passed but don't check the actual value
resultsManager.recordResult(implName, 'has_width_fit_class', true);
} else {
// For all other implementations, expect the fitting class to be present
expect(hasFittingClass).toBe(true);
resultsManager.recordResult(implName, 'has_width_fit_class', true);
}
} catch (error) {
resultsManager.recordResult(implName, 'has_width_fit_class', false, error.message);
throw error;
}
});
// Test: Dropdown should close after selection
test('should close dropdown after selection', () => {
try {
render(<Component />);
// Find the dropdown button
const buttons = screen.getAllByRole('button');
const dropdownButton = buttons.find(button =>
button.textContent.includes('Select query')
);
// Open dropdown
fireEvent.click(dropdownButton);
// Find and click on first option
const option1Elements = screen.getAllByText(/Query 1/i);
const option = option1Elements.find(el =>
el.className.includes('cursor-pointer') ||
el.closest('div[class*="cursor-pointer"]')
);
if (!option) {
throw new Error('Could not find clickable Query 1 option');
}
// Before clicking, we should be able to find Query 2
const query2BeforeClick = screen.queryAllByText(/Query 2/i);
expect(query2BeforeClick.length).toBeGreaterThan(0);
// Click the option
fireEvent.click(option);
// After clicking, the dropdown should be closed and Query 2 should not be visible
// Check for elements that don't have a parent button
const query2AfterClickVisible = screen.queryAllByText(/Query 2/i).filter(el =>
!el.closest('button')
);
expect(query2AfterClickVisible.length).toBe(0);
// The dropdown button should now show Query 1
const updatedButtons = screen.getAllByRole('button');
const updatedDropdownButton = updatedButtons.find(button =>
button.textContent.includes('Query 1')
);
expect(updatedDropdownButton).toBeTruthy();
resultsManager.recordResult(implName, 'closes_dropdown_after_selection', true);
} catch (error) {
resultsManager.recordResult(implName, 'closes_dropdown_after_selection', false, error.message);
throw error;
}
});
});
});
});
};
// Run tests on all implementations
if (implementations && Object.keys(implementations).length > 0) {
console.log(`Found ${Object.keys(implementations).length} implementations to test`);
testImplementations(implementations);
} else {
console.error('No implementations found or implementations are empty');
// Add at least one dummy test to avoid Jest error
test('dummy test to avoid Jest error', () => {
expect(true).toBe(true);
});
} | null | null | null | test | {
"name": "js-test-framework",
"version": "1.0.0",
"description": "JavaScript testing framework for multiple implementations",
"main": "index.js",
"type": "commonjs",
"scripts": {
"test": "jest"
},
"devDependencies": {
"@babel/preset-env": "^7.24.0",
"@babel/preset-react": "^7.23.3",
"@testing-library/jest-dom": "^6.4.2",
"@testing-library/react": "^14.2.1",
"babel-jest": "^29.7.0",
"glob": "^10.3.10",
"jest": "^29.7.0",
"jest-environment-jsdom": "^29.7.0",
"react": "^18.2.0",
"react-dom": "^18.2.0"
},
"jest": {
"setupFilesAfterEnv": ["./jest-setup.js", "./jest-dom-setup.js"],
"testEnvironment": "jsdom",
"testMatch": ["**/tests/**/*.test.js"],
"verbose": true,
"transform": {
"^.+\\.(js|jsx)$": "babel-jest"
},
"moduleNameMapper": {
"\\.(css|less|scss|sass)$": "<rootDir>/__mocks__/styleMock.js",
"\\.(jpg|jpeg|png|gif|webp|svg)$": "<rootDir>/__mocks__/fileMock.js",
"^../../api/(.*)$": "<rootDir>/__mocks__/api/$1"
},
"collectCoverage": true,
"coverageDirectory": "./coverage",
"collectCoverageFrom": [
"./*.jsx",
"!jest-setup.js"
]
}
} | // jest-setup.js - Setup file for Jest tests
const fs = require('fs');
const path = require('path');
const glob = require('glob');
/**
* Utility class to handle JavaScript implementations
*/
class TestUtils {
/**
* Find all implementation files in the current directory
* @param {string} directory - Directory to search in (defaults to current directory)
* @returns {Array<string>} List of implementation file paths
*/
static discoverImplementationFiles(directory = null) {
if (!directory) {
directory = __dirname;
}
const patterns = [
'modified_code\\d+\\.jsx',
'new_code\\d+\\.jsx',
'original_modified_code\\d+\\.jsx',
'implementation\\d*\\.jsx',
'original_code\\.jsx'
];
const regexPattern = new RegExp(patterns.join('|'));
const implementations = [];
// Use glob to find matching files
const files = glob.sync(path.join(directory, '*.jsx'));
for (const filePath of files) {
if (regexPattern.test(path.basename(filePath))) {
implementations.push(filePath);
}
}
// Sort files numerically
implementations.sort((a, b) => {
// Keep original_code always first
if (path.basename(a) === 'original_code.jsx') return -1;
if (path.basename(b) === 'original_code.jsx') return 1;
const aMatch = path.basename(a).match(/(\d+)/);
const bMatch = path.basename(b).match(/(\d+)/);
const aNum = aMatch ? parseInt(aMatch[1]) : 0;
const bNum = bMatch ? parseInt(bMatch[1]) : 0;
return aNum - bNum;
});
return implementations;
}
/**
* Safely load a module from a file path
* @param {string} filePath - Path to the JavaScript or JSX file
* @param {string} moduleName - Optional module name (defaults to filename)
* @returns {Object} Loaded module with error information if any
*/
static loadModule(filePath, moduleName = null) {
if (!moduleName) {
moduleName = path.basename(filePath).replace(/\.(js|jsx)$/, '');
}
// Create unique module name to avoid conflicts
const sandboxId = path.basename(path.dirname(filePath));
const uniqueModuleName = `${sandboxId}_${moduleName}`;
try {
// Read file contents
const sourceCode = fs.readFileSync(filePath, 'utf8');
// Create module object
const moduleObj = {
__file__: filePath,
__name__: uniqueModuleName,
__display_name__: moduleName,
__source__: sourceCode, // Store source code for testing purposes
__errors__: [] // Track errors in the module
};
// For JSX files, we can't easily test-compile, so we'll skip that step
// and rely on Jest/Babel to handle the JSX transformation
if (!filePath.endsWith('.jsx')) {
try {
// Try to test-compile the code to check for syntax errors
new Function(sourceCode);
} catch (e) {
const errorMsg = `Syntax error: ${e.message}`;
console.error(`Syntax error in ${filePath}: ${e.message}`);
console.error(` Line ${e.lineNumber}, column ${e.columnNumber}`);
// Record the error but continue loading what we can
moduleObj.__errors__.push({
type: 'syntax',
message: errorMsg,
lineNumber: e.lineNumber,
columnNumber: e.columnNumber
});
}
}
// For JSX/React components, we'll handle them differently in tests
// and not attempt to require them directly
if (filePath.endsWith('.jsx')) {
moduleObj.__component_file__ = true;
return moduleObj;
}
try {
// Try to require the module even if there were syntax errors
// This may or may not succeed
delete require.cache[require.resolve(filePath)];
const loadedModule = require(filePath);
// Copy all properties from the loaded module
for (const key in loadedModule) {
if (Object.prototype.hasOwnProperty.call(loadedModule, key)) {
moduleObj[key] = loadedModule[key];
}
}
} catch (e) {
const errorMsg = `Runtime error: ${e.message}`;
console.error(`Error executing module ${filePath}: ${e.message}`);
console.error(e.stack);
// Record the runtime error
moduleObj.__errors__.push({
type: 'runtime',
message: errorMsg,
stack: e.stack
});
}
return moduleObj;
} catch (e) {
const moduleObj = {
__file__: filePath,
__name__: uniqueModuleName,
__display_name__: moduleName,
__errors__: []
};
if (e.code === 'ENOENT') {
const errorMsg = `File not found: ${e.message}`;
console.error(`Error: ${errorMsg}`);
moduleObj.__errors__.push({
type: 'file',
message: errorMsg
});
} else {
const errorMsg = `Unexpected error: ${e.message}`;
console.error(`Error loading module ${filePath}: ${e.message}`);
moduleObj.__errors__.push({
type: 'unknown',
message: errorMsg
});
}
return moduleObj;
}
}
/**
* Load all implementation files in the directory
* @param {string} directory - Directory to search in (defaults to current directory)
* @returns {Object} Dictionary mapping module names to loaded modules
*/
static loadAllImplementations(directory = null) {
if (!directory) {
directory = __dirname;
}
const implementations = {};
const implementationFiles = this.discoverImplementationFiles(directory);
if (implementationFiles.length === 0) {
console.warn("WARNING: No implementation files found. Check your file naming patterns.");
}
for (const filePath of implementationFiles) {
const moduleName = path.basename(filePath).replace(/\.(js|jsx)$/, '');
const module = this.loadModule(filePath, moduleName);
// Always add the module, even if it has errors
implementations[moduleName] = module;
if (module.__errors__ && module.__errors__.length > 0) {
console.log(`Loaded with errors: ${moduleName} - ${module.__errors__.length} errors found`);
module.__errors__.forEach(err => console.log(` - ${err.type}: ${err.message}`));
} else {
console.log(`Successfully loaded: ${moduleName}`);
}
}
return implementations;
}
/**
* Check if a function exists in a module and is callable
* @param {Object} module - The loaded module
* @param {string} functionName - Name of the function to test
* @returns {boolean} Whether the function exists and is callable
*/
static hasFunction(module, functionName) {
return module && typeof module[functionName] === 'function';
}
/**
* Safely call a function in a module with error handling
* @param {Object} module - The loaded module
* @param {string} functionName - Name of the function to call
* @param {Array} args - Arguments to pass to the function
* @returns {Object} Result with success status and value or error
*/
static callFunction(module, functionName, ...args) {
if (!this.hasFunction(module, functionName)) {
return {
success: false,
error: `Function '${functionName}' not found or not callable`
};
}
try {
const result = module[functionName](...args);
return {
success: true,
value: result
};
} catch (e) {
return {
success: false,
error: e.message,
stack: e.stack
};
}
}
}
/**
* Class to manage test results
*/
class TestResultsManager {
constructor() {
this.results = {};
this.sandboxName = path.basename(__dirname);
}
/**
* Record a test result for an implementation
* @param {string} implName - Implementation name
* @param {string} testName - Test name
* @param {boolean} passed - Whether the test passed
* @param {string} errorMsg - Optional error message
*/
recordResult(implName, testName, passed, errorMsg = null) {
if (!this.results[implName]) {
this.results[implName] = { passed: 0, failed: 0, skipped: 0, errors: [] };
}
if (passed) {
this.results[implName].passed += 1;
} else {
this.results[implName].failed += 1;
if (errorMsg) {
this.results[implName].errors.push({
test: testName,
error: errorMsg
});
}
}
}
/**
* Record a skipped test for an implementation
* @param {string} implName - Implementation name
* @param {string} testName - Test name
* @param {string} reason - Optional reason for skipping
*/
recordSkip(implName, testName, reason = null) {
if (!this.results[implName]) {
this.results[implName] = { passed: 0, failed: 0, skipped: 0, errors: [] };
}
this.results[implName].skipped += 1;
if (reason) {
this.results[implName].errors.push({
test: testName,
error: `SKIPPED: ${reason}`
});
}
}
/**
* Determine the winner based on test results
* @returns {Array} [winner index, results]
*/
getWinner() {
let winner = null;
let maxPassed = -1;
for (const [implName, results] of Object.entries(this.results)) {
// Skip original code when determining winner
if (implName === "original_code" || implName === "original_codex") {
continue;
}
if (results.passed > maxPassed) {
maxPassed = results.passed;
winner = implName;
} else if (results.passed === maxPassed && winner !== null) {
if (results.failed < this.results[winner].failed) {
winner = implName;
}
}
}
// If we have a tie, prefer the modified_code implementations over others
if (winner) {
// Create a tie-breaker score that prioritizes implementations based on instruction match
const tiedImplementations = Object.entries(this.results)
.filter(([name, res]) =>
name !== "original_code" &&
name !== "original_codex" &&
res.passed === maxPassed)
.map(([name, _]) => name);
if (tiedImplementations.length > 1) {
// First, prefer the modified_code implementations
const modifiedCodeImpls = tiedImplementations.filter(name =>
name.startsWith('modified_code'));
if (modifiedCodeImpls.length > 0) {
// If there are multiple modified_code implementations, pick the first one
winner = modifiedCodeImpls[0];
}
}
}
// Convert winner to numeric index if possible
let winnerIndex = -1;
if (winner) {
if (/modified_code\d+/.test(winner)) {
const match = winner.match(/(\d+)/);
if (match) {
winnerIndex = parseInt(match[1]);
}
} else if (/new_code\d+/.test(winner)) {
const match = winner.match(/(\d+)/);
if (match) {
winnerIndex = parseInt(match[1]);
}
}
}
return [winnerIndex, this.results];
}
/**
* Save test results to a JSON file
* @param {string} filename - Output filename
* @returns {Object} Results summary object
*/
saveResults(filename = "test_results.json") {
const [winnerIndex, results] = this.getWinner();
// Check if all tests were skipped
const allSkipped = Object.entries(results)
.filter(([implName]) => implName !== "original_code")
.every(([_, stats]) => {
return stats.skipped === (stats.passed + stats.failed + stats.skipped);
});
const output = {
winner: winnerIndex,
all_skipped: allSkipped,
results: {}
};
for (const [name, stats] of Object.entries(results)) {
if (!name.startsWith("_")) {
output.results[name] = {
passed: stats.passed,
failed: stats.failed,
skipped: stats.skipped,
total: stats.passed + stats.failed + stats.skipped
};
}
}
fs.writeFileSync(filename, JSON.stringify(output, null, 2));
console.log(`Test results saved to ${filename}`);
return output;
}
}
// Create results manager
const resultsManager = new TestResultsManager();
// Set up global variables for Jest tests
beforeAll(() => {
// Load implementations inside the beforeAll to ensure it runs in the Jest environment
const implementations = TestUtils.loadAllImplementations();
console.log(`Found ${Object.keys(implementations).length} implementations`);
global.__TEST_UTILS__ = TestUtils;
global.__RESULTS_MANAGER__ = resultsManager;
global.__IMPLEMENTATIONS__ = implementations;
});
// After all tests run, save the results
afterAll(() => {
resultsManager.saveResults();
});
// Export for use in tests
module.exports = {
TestUtils,
TestResultsManager,
resultsManager
}; | module.exports = {
presets: [
['@babel/preset-env', { targets: { node: 'current' } }],
['@babel/preset-react', { runtime: 'automatic' }]
],
}; | {
"hidden.js": null,
"__mocks__/messages.js": null,
"__mocks__/jquery.js": null,
"__mocks__/MapCharacter.jsx": null,
"__mocks__/Sprite.jsx": null,
"__mocks__/GameMap.jsx": null,
"__mocks__/CharacterStatUI.jsx": null,
"__mocks__/CharacterData.js": null,
"__mocks__/react-router-dom.js": null,
"__mocks__/styleMock.js": "module.exports = {};",
"__mocks__/character-stat-ui/CharacterStatUI.jsx": null,
"jest.config.js": null,
"__mocks__/database.js": null,
"__mocks__/camera.service.js": null,
"__mocks__/module-loader.js": null,
"__mocks__/globe.js": null,
"__mocks__/fetch.js": null,
"__mocks__/three.js": null,
"__mocks__/fileMock.js": "module.exports = 'test-file-stub';",
"__mocks__/d3.js": null,
"__mocks__/document.js": null,
"__mocks__/SingleCharacterStatUI.js": null,
"__mocks__/MockChild.js": null,
"__mocks__/Sprite.js": null,
"highlighted_code.jsx": null,
"mocks/CharacterStatUIMock.jsx": null,
"mocks/CharacterStatUIMock.js": null,
"__mocks__/react-icons/md.js": "// Mock for MdOutlineArrowDropDown component\nconst MdOutlineArrowDropDown = () => {\n return 'MdOutlineArrowDropDown';\n};\n\nmodule.exports = {\n MdOutlineArrowDropDown\n};",
"__mocks__/api/query.js": "// Mock for useGetQueryListQuery hook\nconst mockQueries = {\n data: [\n { id: 1, name: 'Query 1' },\n { id: 2, name: 'Query 2' },\n { id: 3, name: 'Query 3' }\n ]\n};\n\nconst useGetQueryListQuery = (params, options) => {\n return {\n data: mockQueries,\n isFetching: false,\n isLoading: false\n };\n};\n\nmodule.exports = {\n useGetQueryListQuery\n};"
} | // Import jest-dom utilities
require('@testing-library/jest-dom'); |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.