File size: 4,861 Bytes
b309c22 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
#!/usr/bin/env python3
"""
Test script to verify length instructions are working
"""
import sys
import os
# Add current directory to Python path
script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, script_dir)
def test_length_instructions():
"""Test that length instructions are properly formatted"""
print("π§ͺ Testing Length Instructions...")
try:
from src.autocomplete import SmartAutoComplete
# Create mock settings
class MockSettings:
def __init__(self):
self.OPENAI_API_KEY = "test-key"
self.ANTHROPIC_API_KEY = ""
self.DEFAULT_PROVIDER = "openai"
self.CACHE_TTL = 3600
self.CACHE_MAX_SIZE = 100
# Create mock API client that captures the messages
class MockAPIClient:
def __init__(self, settings=None):
self.last_messages = None
def get_completion(self, messages, temperature=0.7, max_tokens=150, provider=None):
self.last_messages = messages
print(f"\nπ API called with max_tokens: {max_tokens}")
print(f"π System prompt: {messages[0]['content'][:200]}...")
print(f"π User message: {messages[1]['content'][:200]}...")
return f"Mock completion response ({max_tokens} tokens requested)"
# Create mock cache
class MockCacheManager:
def __init__(self, settings=None):
pass
def get(self, key):
return None
def set(self, key, value):
pass
# Test setup
settings = MockSettings()
autocomplete = SmartAutoComplete(settings)
autocomplete.api_client = MockAPIClient(settings)
autocomplete.cache_manager = MockCacheManager(settings)
# Test different token lengths
test_cases = [
(50, "short"),
(150, "medium"),
(300, "long"),
(500, "very long")
]
for max_tokens, description in test_cases:
print(f"\nπ Testing {description} output ({max_tokens} tokens):")
suggestions = autocomplete.get_suggestions(
text="Dear Mr. Johnson,",
context="email",
max_tokens=max_tokens,
user_context="Meeting about quarterly budget"
)
# Check if the messages contain the token count
messages = autocomplete.api_client.last_messages
system_prompt = messages[0]['content']
user_message = messages[1]['content']
# Verify token count is mentioned
token_in_system = str(max_tokens) in system_prompt
token_in_user = str(max_tokens) in user_message
print(f" β
Token count in system prompt: {token_in_system}")
print(f" β
Token count in user message: {token_in_user}")
print(f" β
Length instruction present: {'IMPORTANT' in user_message}")
# Check for appropriate length guidance
if max_tokens <= 100:
expected_guidance = "concise and brief"
elif max_tokens <= 200:
expected_guidance = "moderate length"
elif max_tokens <= 300:
expected_guidance = "detailed response"
else:
expected_guidance = "comprehensive and detailed"
guidance_present = expected_guidance in user_message
print(f" β
Appropriate guidance ({expected_guidance}): {guidance_present}")
if not (token_in_system or token_in_user):
print(f" β Token count not found in prompts!")
return False
print("\nβ
All length instruction tests passed!")
return True
except Exception as e:
print(f"β Length instruction test failed: {str(e)}")
import traceback
traceback.print_exc()
return False
def main():
"""Main test function"""
print("π Smart Auto-Complete Length Test")
print("=" * 50)
if test_length_instructions():
print("\nπ Length instructions are working correctly!")
print("\nπ‘ The AI should now generate responses of the requested length.")
print(" - 50 tokens: ~1-2 sentences")
print(" - 150 tokens: ~3-4 sentences")
print(" - 300 tokens: ~1-2 paragraphs")
print(" - 500 tokens: ~2-3 paragraphs")
else:
print("\nβ Length instruction tests failed.")
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
|