#!/usr/bin/env python3 """ Unified Thematic Word Generator using WordFreq + SentenceTransformers Eliminates vocabulary redundancy by using WordFreq as the single vocabulary source for both word lists and frequency data, with all-mpnet-base-v2 for embeddings. Features: - Single vocabulary source (WordFreq 319K words vs previous 3 separate sources) - Unified filtering for crossword-suitable words - 10-tier frequency classification system - Compatible with crossword backend services - Comprehensive modern vocabulary with proper frequency data """ import os import csv import pickle import numpy as np import logging import asyncio from typing import List, Tuple, Optional, Dict, Set, Any from sentence_transformers import SentenceTransformer from sklearn.metrics.pairwise import cosine_similarity from sklearn.cluster import KMeans from datetime import datetime import time from collections import Counter from pathlib import Path # WordFreq imports (assumed to be available) from wordfreq import word_frequency, zipf_frequency, top_n_list # Set up logging with filename and line numbers logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S' ) logger = logging.getLogger(__name__) def get_timestamp(): return datetime.now().strftime("%H:%M:%S") def get_datetimestamp(): return datetime.now().strftime("%Y-%m-%d %H:%M:%S") class VocabularyManager: """ Centralized vocabulary management using WordFreq as the single source. Handles loading, filtering, caching, and frequency data generation. """ def __init__(self, cache_dir: Optional[str] = None, vocab_size_limit: Optional[int] = None): """Initialize vocabulary manager. Args: cache_dir: Directory for caching vocabulary and embeddings vocab_size_limit: Maximum vocabulary size (None for full WordFreq vocabulary) """ if cache_dir is None: cache_dir = os.path.join(os.path.dirname(__file__), 'model_cache') self.cache_dir = Path(cache_dir) self.cache_dir.mkdir(exist_ok=True) # Vocabulary size configuration self.vocab_size_limit = vocab_size_limit or int(os.getenv("MAX_VOCABULARY_SIZE", "100000")) # Cache paths self.vocab_cache_path = self.cache_dir / f"unified_vocabulary_{self.vocab_size_limit}.pkl" self.frequency_cache_path = self.cache_dir / f"unified_frequencies_{self.vocab_size_limit}.pkl" # Loaded data self.vocabulary: List[str] = [] self.word_frequencies: Counter = Counter() self.is_loaded = False def load_vocabulary(self) -> Tuple[List[str], Counter]: """Load vocabulary and frequency data, with caching.""" if self.is_loaded: return self.vocabulary, self.word_frequencies # Try loading from cache if self._load_from_cache(): logger.info(f"āœ… Loaded vocabulary from cache: {len(self.vocabulary):,} words") self.is_loaded = True return self.vocabulary, self.word_frequencies # Generate from WordFreq logger.info("šŸ”„ Generating vocabulary from WordFreq...") self._generate_vocabulary_from_wordfreq() # Save to cache self._save_to_cache() self.is_loaded = True return self.vocabulary, self.word_frequencies def _load_from_cache(self) -> bool: """Load vocabulary and frequencies from cache.""" try: if self.vocab_cache_path.exists() and self.frequency_cache_path.exists(): logger.info("šŸ“¦ Loading vocabulary from cache...") with open(self.vocab_cache_path, 'rb') as f: self.vocabulary = pickle.load(f) with open(self.frequency_cache_path, 'rb') as f: self.word_frequencies = pickle.load(f) return True except Exception as e: logger.warning(f"āš ļø Cache loading failed: {e}") return False def _save_to_cache(self): """Save vocabulary and frequencies to cache.""" try: logger.info("šŸ’¾ Saving vocabulary to cache...") with open(self.vocab_cache_path, 'wb') as f: pickle.dump(self.vocabulary, f) with open(self.frequency_cache_path, 'wb') as f: pickle.dump(self.word_frequencies, f) logger.info("āœ… Vocabulary cached successfully") except Exception as e: logger.warning(f"āš ļø Cache saving failed: {e}") def _generate_vocabulary_from_wordfreq(self): """Generate filtered vocabulary from WordFreq database.""" logger.info(f"šŸ“š Fetching top {self.vocab_size_limit:,} words from WordFreq...") # Get comprehensive word list from WordFreq raw_words = top_n_list('en', self.vocab_size_limit * 2, wordlist='large') # Get extra for filtering logger.info(f"šŸ“„ Retrieved {len(raw_words):,} raw words from WordFreq") # Apply crossword-suitable filtering filtered_words = [] frequency_data = Counter() logger.info("šŸ” Applying crossword filtering...") for word in raw_words: if self._is_crossword_suitable(word): filtered_words.append(word.lower()) # Get frequency data try: freq = word_frequency(word, 'en', wordlist='large') if freq > 0: # Scale frequency to preserve precision frequency_data[word.lower()] = int(freq * 1e9) except: frequency_data[word.lower()] = 1 # Minimal frequency for unknown words if len(filtered_words) >= self.vocab_size_limit: break # Remove duplicates and sort self.vocabulary = sorted(list(set(filtered_words))) self.word_frequencies = frequency_data logger.info(f"āœ… Generated filtered vocabulary: {len(self.vocabulary):,} words") logger.info(f"šŸ“Š Frequency data coverage: {len(self.word_frequencies):,} words") def _is_crossword_suitable(self, word: str) -> bool: """Check if word is suitable for crosswords.""" word = word.lower().strip() # Length check (3-12 characters for crosswords) if len(word) < 3 or len(word) > 12: return False # Must be alphabetic only if not word.isalpha(): return False # Skip boring/common words boring_words = { 'the', 'and', 'for', 'are', 'but', 'not', 'you', 'all', 'this', 'that', 'with', 'from', 'they', 'were', 'been', 'have', 'their', 'said', 'each', 'which', 'what', 'there', 'will', 'more', 'when', 'some', 'like', 'into', 'time', 'very', 'only', 'has', 'had', 'who', 'its', 'now', 'find', 'long', 'down', 'day', 'did', 'get', 'come', 'made', 'may', 'part' } if word in boring_words: return False # Skip obvious plurals (simple heuristic) if len(word) > 4 and word.endswith('s') and not word.endswith(('ss', 'us', 'is')): return False # Skip words with repeated characters (often not real words) if len(set(word)) < len(word) * 0.6: # Less than 60% unique characters return False return True class UnifiedThematicWordGenerator: """ Unified thematic word generator using WordFreq vocabulary and all-mpnet-base-v2 embeddings. Compatible with both hack tools and crossword backend services. Eliminates vocabulary redundancy by using single source for everything. """ def __init__(self, cache_dir: Optional[str] = None, model_name: str = 'all-mpnet-base-v2', vocab_size_limit: Optional[int] = None): """Initialize the unified thematic word generator. Args: cache_dir: Directory to cache model and embeddings model_name: Sentence transformer model to use vocab_size_limit: Maximum vocabulary size (None for 100K default) """ if cache_dir is None: cache_dir = os.path.join(os.path.dirname(__file__), 'model_cache') self.cache_dir = Path(cache_dir) self.cache_dir.mkdir(exist_ok=True) self.model_name = model_name self.vocab_size_limit = vocab_size_limit # Core components self.vocab_manager = VocabularyManager(cache_dir, vocab_size_limit) self.model: Optional[SentenceTransformer] = None # Loaded data self.vocabulary: List[str] = [] self.word_frequencies: Counter = Counter() self.vocab_embeddings: Optional[np.ndarray] = None self.frequency_tiers: Dict[str, str] = {} self.tier_descriptions: Dict[str, str] = {} # Cache paths for embeddings vocab_hash = f"{model_name}_{vocab_size_limit or 100000}" self.embeddings_cache_path = self.cache_dir / f"unified_embeddings_{vocab_hash}.npy" self.is_initialized = False def initialize(self): """Initialize the generator (synchronous version).""" if self.is_initialized: return start_time = time.time() logger.info(f"šŸš€ Initializing Unified Thematic Word Generator...") # Load vocabulary and frequency data self.vocabulary, self.word_frequencies = self.vocab_manager.load_vocabulary() # Load or create frequency tiers self.frequency_tiers = self._create_frequency_tiers() # Load model logger.info(f"šŸ¤– Loading embedding model: {self.model_name}") model_start = time.time() self.model = SentenceTransformer( f'sentence-transformers/{self.model_name}', cache_folder=str(self.cache_dir) ) model_time = time.time() - model_start logger.info(f"āœ… Model loaded in {model_time:.2f}s") # Load or create embeddings self.vocab_embeddings = self._load_or_create_embeddings() self.is_initialized = True total_time = time.time() - start_time logger.info(f"šŸŽ‰ Unified generator initialized in {total_time:.2f}s") logger.info(f"šŸ“Š Vocabulary: {len(self.vocabulary):,} words") logger.info(f"šŸ“ˆ Frequency data: {len(self.word_frequencies):,} words") async def initialize_async(self): """Initialize the generator (async version for backend compatibility).""" return self.initialize() # For now, same as sync version def _load_or_create_embeddings(self) -> np.ndarray: """Load embeddings from cache or create them.""" # Try loading from cache if self.embeddings_cache_path.exists(): try: logger.info("šŸ“¦ Loading embeddings from cache...") embeddings = np.load(self.embeddings_cache_path) logger.info(f"āœ… Loaded embeddings: {embeddings.shape}") return embeddings except Exception as e: logger.warning(f"āš ļø Embeddings cache loading failed: {e}") # Create embeddings logger.info("šŸ”„ Creating embeddings for vocabulary...") start_time = time.time() # Create embeddings in batches for memory efficiency batch_size = 512 all_embeddings = [] for i in range(0, len(self.vocabulary), batch_size): batch_words = self.vocabulary[i:i + batch_size] batch_embeddings = self.model.encode( batch_words, convert_to_tensor=False, show_progress_bar=i == 0 # Only show progress for first batch ) all_embeddings.append(batch_embeddings) if i % (batch_size * 10) == 0: logger.info(f"šŸ“Š Embeddings progress: {i:,}/{len(self.vocabulary):,}") embeddings = np.vstack(all_embeddings) embedding_time = time.time() - start_time logger.info(f"āœ… Created embeddings in {embedding_time:.2f}s: {embeddings.shape}") # Save to cache try: np.save(self.embeddings_cache_path, embeddings) logger.info("šŸ’¾ Embeddings cached successfully") except Exception as e: logger.warning(f"āš ļø Embeddings cache saving failed: {e}") return embeddings def _create_frequency_tiers(self) -> Dict[str, str]: """Create 10-tier frequency classification system.""" if not self.word_frequencies: return {} logger.info("šŸ“Š Creating frequency tiers...") tiers = {} # Calculate percentile-based thresholds for even distribution all_counts = list(self.word_frequencies.values()) all_counts.sort(reverse=True) # Define 10 tiers with percentile-based thresholds tier_definitions = [ ("tier_1_ultra_common", 0.999, "Ultra Common (Top 0.1%)"), ("tier_2_extremely_common", 0.995, "Extremely Common (Top 0.5%)"), ("tier_3_very_common", 0.99, "Very Common (Top 1%)"), ("tier_4_highly_common", 0.97, "Highly Common (Top 3%)"), ("tier_5_common", 0.92, "Common (Top 8%)"), ("tier_6_moderately_common", 0.85, "Moderately Common (Top 15%)"), ("tier_7_somewhat_uncommon", 0.70, "Somewhat Uncommon (Top 30%)"), ("tier_8_uncommon", 0.50, "Uncommon (Top 50%)"), ("tier_9_rare", 0.25, "Rare (Top 75%)"), ("tier_10_very_rare", 0.0, "Very Rare (Bottom 25%)") ] # Calculate actual thresholds thresholds = [] for tier_name, percentile, description in tier_definitions: if percentile > 0: idx = int((1 - percentile) * len(all_counts)) threshold = all_counts[min(idx, len(all_counts) - 1)] else: threshold = 0 thresholds.append((tier_name, threshold, description)) # Store descriptions self.tier_descriptions = {name: desc for name, _, desc in thresholds} # Assign tiers for word, count in self.word_frequencies.items(): assigned = False for tier_name, threshold, description in thresholds: if count >= threshold: tiers[word] = tier_name assigned = True break if not assigned: tiers[word] = "tier_10_very_rare" # Words not in frequency data are very rare for word in self.vocabulary: if word not in tiers: tiers[word] = "tier_10_very_rare" # Log tier distribution tier_counts = Counter(tiers.values()) logger.info(f"āœ… Created frequency tiers:") for tier_name, count in sorted(tier_counts.items()): desc = self.tier_descriptions.get(tier_name, tier_name) logger.info(f" {desc}: {count:,} words") return tiers def generate_thematic_words(self, inputs, num_words: int = 20, min_similarity: float = 0.3, multi_theme: bool = False, difficulty_tier: Optional[str] = None) -> List[Tuple[str, float, str]]: """Generate thematically related words from input seeds. Args: inputs: Single string, or list of words/sentences as theme seeds num_words: Number of words to return min_similarity: Minimum similarity threshold multi_theme: Whether to detect and use multiple themes difficulty_tier: Specific tier to filter by (e.g., "tier_5_common") Returns: List of (word, similarity_score, frequency_tier) tuples """ if not self.is_initialized: self.initialize() logger.info(f"šŸŽÆ Generating {num_words} thematic words") # Handle single string input (convert to list for compatibility) if isinstance(inputs, str): inputs = [inputs] if not inputs: return [] # Clean inputs clean_inputs = [inp.strip().lower() for inp in inputs if inp.strip()] if not clean_inputs: return [] logger.info(f"šŸ“ Input themes: {clean_inputs}") if difficulty_tier: logger.info(f"šŸ“Š Filtering to tier: {self.tier_descriptions.get(difficulty_tier, difficulty_tier)}") # Get theme vector(s) using original logic # Auto-enable multi-theme for 3+ inputs (matching original behavior) auto_multi_theme = len(clean_inputs) > 2 final_multi_theme = multi_theme or auto_multi_theme logger.info(f"šŸ” Multi-theme detection: {final_multi_theme} (auto: {auto_multi_theme}, manual: {multi_theme})") if final_multi_theme: theme_vectors = self._detect_multiple_themes(clean_inputs) logger.info(f"šŸ“Š Detected {len(theme_vectors)} themes") else: theme_vectors = [self._compute_theme_vector(clean_inputs)] logger.info("šŸ“Š Using single theme vector") # Collect similarities from all themes all_similarities = np.zeros(len(self.vocabulary)) for theme_vector in theme_vectors: # Compute similarities with vocabulary similarities = cosine_similarity(theme_vector, self.vocab_embeddings)[0] all_similarities += similarities / len(theme_vectors) # Average across themes logger.info("āœ… Computed semantic similarities") # Get top candidates top_indices = np.argsort(all_similarities)[::-1] # Filter and format results results = [] input_words_set = set(clean_inputs) for idx in top_indices: if len(results) >= num_words * 3: # Get extra candidates for filtering break similarity_score = all_similarities[idx] word = self.vocabulary[idx] # Apply filters if similarity_score < min_similarity: continue # Skip input words themselves if word.lower() in input_words_set: continue word_tier = self.frequency_tiers.get(word, "tier_10_very_rare") # Filter by difficulty tier if specified if difficulty_tier and word_tier != difficulty_tier: continue results.append((word, similarity_score, word_tier)) # Sort by similarity and return top results results.sort(key=lambda x: x[1], reverse=True) final_results = results[:num_words] logger.info(f"āœ… Generated {len(final_results)} thematic words") return final_results def _compute_theme_vector(self, inputs: List[str]) -> np.ndarray: """Compute semantic centroid from input words/sentences.""" logger.info(f"šŸŽÆ Computing theme vector for {len(inputs)} inputs") # Encode all inputs input_embeddings = self.model.encode(inputs, convert_to_tensor=False, show_progress_bar=False) logger.info(f"āœ… Encoded {len(inputs)} inputs") # Simple approach: average all input embeddings theme_vector = np.mean(input_embeddings, axis=0) return theme_vector.reshape(1, -1) def _detect_multiple_themes(self, inputs: List[str], max_themes: int = 3) -> List[np.ndarray]: """Detect multiple themes using clustering.""" if len(inputs) < 2: return [self._compute_theme_vector(inputs)] logger.info(f"šŸ” Detecting multiple themes from {len(inputs)} inputs") # Encode inputs input_embeddings = self.model.encode(inputs, convert_to_tensor=False, show_progress_bar=False) logger.info("āœ… Encoded inputs for clustering") # Determine optimal number of clusters n_clusters = min(max_themes, len(inputs), 3) logger.info(f"šŸ“Š Using {n_clusters} clusters for theme detection") if n_clusters == 1: return [np.mean(input_embeddings, axis=0).reshape(1, -1)] # Perform clustering kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10) kmeans.fit(input_embeddings) logger.info(f"āœ… Clustered inputs into {n_clusters} themes") # Return cluster centers as theme vectors return [center.reshape(1, -1) for center in kmeans.cluster_centers_] def get_tier_words(self, tier: str, limit: int = 1000) -> List[str]: """Get all words from a specific frequency tier. Args: tier: Frequency tier name (e.g., "tier_5_common") limit: Maximum number of words to return Returns: List of words in the specified tier """ if not self.is_initialized: self.initialize() tier_words = [word for word, word_tier in self.frequency_tiers.items() if word_tier == tier] return tier_words[:limit] def get_word_info(self, word: str) -> Dict[str, Any]: """Get comprehensive information about a word. Args: word: Word to get information for Returns: Dictionary with word info including frequency, tier, etc. """ if not self.is_initialized: self.initialize() word_lower = word.lower() info = { 'word': word, 'in_vocabulary': word_lower in self.vocabulary, 'frequency': self.word_frequencies.get(word_lower, 0), 'tier': self.frequency_tiers.get(word_lower, "tier_10_very_rare"), 'tier_description': self.tier_descriptions.get( self.frequency_tiers.get(word_lower, "tier_10_very_rare"), "Unknown" ) } return info # Backend compatibility methods async def find_similar_words(self, topic: str, difficulty: str = "medium", max_words: int = 15) -> List[Dict[str, Any]]: """Backend-compatible method for finding similar words. Returns list of word dictionaries compatible with crossword_generator.py Expected format: [{"word": str, "clue": str}, ...] """ # Map difficulty to appropriate tier filtering difficulty_tier_map = { "easy": [ "tier_2_extremely_common", "tier_3_very_common", "tier_4_highly_common"], "medium": ["tier_4_highly_common", "tier_5_common", "tier_6_moderately_common", "tier_7_somewhat_uncommon"], "hard": ["tier_7_somewhat_uncommon", "tier_8_uncommon", "tier_9_rare"] } allowed_tiers = difficulty_tier_map.get(difficulty, difficulty_tier_map["medium"]) # Get thematic words all_results = self.generate_thematic_words( topic, num_words=max_words * 2, # Get extra for filtering min_similarity=0.3 ) # Filter by difficulty and format for backend backend_words = [] for word, similarity, tier in all_results: # Check difficulty criteria if not self._matches_backend_difficulty(word, difficulty): continue # Optional tier filtering for more precise difficulty control # (Comment out if tier filtering is too restrictive) # if tier not in allowed_tiers: # continue # Format for backend compatibility backend_word = { "word": word.upper(), # Backend expects uppercase "clue": self._generate_simple_clue(word, topic), "similarity": similarity, "tier": tier } backend_words.append(backend_word) if len(backend_words) >= max_words: break logger.info(f"šŸŽÆ Generated {len(backend_words)} words for topic '{topic}' (difficulty: {difficulty})") return backend_words def _matches_backend_difficulty(self, word: str, difficulty: str) -> bool: """Check if word matches backend difficulty criteria.""" difficulty_map = { "easy": {"min_len": 3, "max_len": 8}, "medium": {"min_len": 4, "max_len": 10}, "hard": {"min_len": 5, "max_len": 15} } criteria = difficulty_map.get(difficulty, difficulty_map["medium"]) return criteria["min_len"] <= len(word) <= criteria["max_len"] def _generate_simple_clue(self, word: str, topic: str) -> str: """Generate a simple clue for the word (backend compatibility).""" # Basic clue templates matching backend expectations word_lower = word.lower() topic_lower = topic.lower() # Topic-specific clue templates if "animal" in topic_lower: return f"{word_lower} (animal)" elif "tech" in topic_lower or "computer" in topic_lower: return f"{word_lower} (technology)" elif "science" in topic_lower: return f"{word_lower} (science)" elif "geo" in topic_lower or "place" in topic_lower: return f"{word_lower} (geography)" elif "food" in topic_lower: return f"{word_lower} (food)" else: return f"{word_lower} (related to {topic_lower})" def get_vocabulary_size(self) -> int: """Get the size of the loaded vocabulary.""" return len(self.vocabulary) def get_tier_distribution(self) -> Dict[str, int]: """Get distribution of words across frequency tiers.""" if not self.frequency_tiers: return {} tier_counts = Counter(self.frequency_tiers.values()) return dict(tier_counts) # Backwards compatibility aliases ThematicWordGenerator = UnifiedThematicWordGenerator # For existing code def main(): """Demo the unified thematic word generator.""" print("šŸš€ Unified Thematic Word Generator Demo") print("=" * 60) # Initialize generator print("šŸ”„ Initializing generator (this may take a moment)...") generator = UnifiedThematicWordGenerator(vocab_size_limit=50000) # Use smaller vocab for demo generator.initialize() # Test topics test_topics = ["cat", "science", "computer", "ocean", "music"] print("\nšŸ“Š Vocabulary Statistics:") print(f"Total vocabulary: {generator.get_vocabulary_size():,} words") print(f"Tier distribution: {generator.get_tier_distribution()}") print("\nšŸŽÆ Thematic Word Generation:") print("=" * 60) for topic in test_topics: print(f"\nTopic: '{topic}'") print("-" * 30) # Generate words with tier info results = generator.generate_thematic_words(topic, num_words=8) if results: for word, similarity, tier in results: tier_desc = generator.tier_descriptions.get(tier, tier) print(f" {word:<15} (sim: {similarity:.3f}, {tier_desc})") else: print(" No results found.") print("\nšŸŽÆ Tier-Specific Generation:") print("=" * 60) # Test tier-specific generation tier_results = generator.generate_thematic_words( "animal", num_words=5, difficulty_tier="tier_5_common" ) print(f"\nCommon animal words:") for word, similarity, tier in tier_results: print(f" {word:<15} (similarity: {similarity:.3f})") # Interactive mode print("\n" + "=" * 60) print("šŸŽ® INTERACTIVE MODE") print("=" * 60) print("Commands:") print(" - Generate words for single topic") print(" , , - Generate words for multiple topics (comma-separated)") print(" \"\" - Generate words from sentence theme") print(" - Generate specific number of words") print(" tier - Generate words from specific tier") print(" difficulty - Generate words by difficulty (easy/medium/hard)") print(" multi - Force multi-theme detection") print(" info - Get word information") print(" tiers - Show all available tiers") print(" stats - Show vocabulary statistics") print(" help - Show this help") print(" quit - Exit") print() print("Examples:") print(" I love animals # Single sentence theme") print(" cats, dogs, pets # Multiple topics (auto multi-theme)") print(" \"I love you, moonpie, chocolate\" # Mixed: sentence + words") print(" science, technology 15 # 15 words from multiple topics") print(" animal tier tier_5_common # Single topic, specific tier") print() print("Note: Multi-theme is automatically enabled for 3+ inputs") print() while True: try: user_input = input("šŸŽÆ Enter command: ").strip() if user_input.lower() in ['quit', 'exit', 'q']: break if not user_input: continue parts = user_input.split() if user_input.lower() == 'help': print("\nCommands:") print(" - Generate words for single topic") print(" , , - Generate words for multiple topics (comma-separated)") print(" \"\" - Generate words from sentence theme") print(" - Generate specific number of words") print(" tier - Generate words from specific tier") print(" difficulty - Generate words by difficulty (easy/medium/hard)") print(" multi - Force multi-theme detection") print(" info - Get word information") print(" tiers - Show all available tiers") print(" stats - Show vocabulary statistics") print(" help - Show this help") print(" quit - Exit") print() print("Examples:") print(" I love animals # Single sentence theme") print(" cats, dogs, pets # Multiple topics (auto multi-theme)") print(" \"I love you, moonpie, chocolate\" # Mixed: sentence + words") print(" science, technology 15 # 15 words from multiple topics") print(" animal tier tier_5_common # Single topic, specific tier") print() print("Note: Multi-theme is automatically enabled for 3+ inputs") continue elif user_input.lower() == 'stats': print(f"\nšŸ“Š Vocabulary Statistics:") print(f" Total words: {generator.get_vocabulary_size():,}") tier_dist = generator.get_tier_distribution() print(f" Tier distribution:") for tier, count in sorted(tier_dist.items()): tier_desc = generator.tier_descriptions.get(tier, tier) print(f" {tier_desc}: {count:,}") continue elif user_input.lower() == 'tiers': print(f"\nšŸŽÆ Available Frequency Tiers:") for tier_name, description in sorted(generator.tier_descriptions.items()): count = generator.get_tier_distribution().get(tier_name, 0) print(f" {tier_name}: {description} ({count:,} words)") continue elif parts[0].lower() == 'info' and len(parts) > 1: word = parts[1] info = generator.get_word_info(word) print(f"\nšŸ“ Word Information: '{word}'") print(f" In vocabulary: {info['in_vocabulary']}") print(f" Frequency: {info['frequency']:,}") print(f" Tier: {info['tier']}") print(f" Description: {info['tier_description']}") continue # Parse input-based commands # Handle quoted strings (for sentences or multi-word themes) if user_input.startswith('"') and '"' in user_input[1:]: # Extract quoted content quote_end = user_input.index('"', 1) quoted_content = user_input[1:quote_end] remaining = user_input[quote_end + 1:].strip() # For quoted content, check if it contains commas (multiple inputs) if ',' in quoted_content: # Split on commas for multiple inputs: "sentence1, word2, sentence3" inputs = [item.strip() for item in quoted_content.split(',') if item.strip()] else: # Single sentence/phrase inputs = [quoted_content] # Parse remaining parameters remaining_parts = remaining.split() if remaining else [] else: # Handle unquoted input - look for comma separation first # Find where parameters start (look for known parameter keywords) param_keywords = ['tier', 'difficulty', 'multi'] input_end = len(parts) for i, part in enumerate(parts): if part.lower() in param_keywords or part.isdigit(): input_end = i break # Join the input parts to look for comma separation input_text = ' '.join(parts[:input_end]) remaining_parts = parts[input_end:] # Check for comma-separated inputs if ',' in input_text: # Split on commas: "word1, sentence two, word3" inputs = [item.strip() for item in input_text.split(',') if item.strip()] else: # For non-comma input, treat as single theme # Don't split on spaces - preserve as single input inputs = [input_text] if input_text.strip() else [] # Parse parameters num_words = 10 difficulty_tier = None difficulty_level = None multi_theme = False i = 0 while i < len(remaining_parts): if remaining_parts[i].lower() == 'tier' and i + 1 < len(remaining_parts): difficulty_tier = remaining_parts[i + 1] i += 2 elif remaining_parts[i].lower() == 'difficulty' and i + 1 < len(remaining_parts): difficulty_level = remaining_parts[i + 1] i += 2 elif remaining_parts[i].lower() == 'multi': multi_theme = True i += 1 elif remaining_parts[i].isdigit(): num_words = int(remaining_parts[i]) i += 1 else: i += 1 # Display what we're generating for if isinstance(inputs, str): print(f"\nšŸŽÆ Words for: '{inputs}'") else: print(f"\nšŸŽÆ Words for: {inputs}") if multi_theme: print("šŸ” Using multi-theme detection") print("-" * 50) try: if difficulty_level: # Use backend-compatible method for difficulty-based generation # Convert inputs to single topic for backend compatibility if isinstance(inputs, list): topic_for_backend = ' '.join(inputs) else: topic_for_backend = inputs import asyncio backend_results = asyncio.run(generator.find_similar_words(topic_for_backend, difficulty_level, num_words)) if backend_results: for word_data in backend_results: word = word_data['word'] tier = word_data.get('tier', 'unknown') similarity = word_data.get('similarity', 0.0) tier_desc = generator.tier_descriptions.get(tier, tier) print(f" {word.lower():<15} (sim: {similarity:.3f}, {tier_desc})") else: print(" No words found for this difficulty level.") else: # Use main generation method with full multi-input support results = generator.generate_thematic_words( inputs, num_words=num_words, difficulty_tier=difficulty_tier, multi_theme=multi_theme ) if results: # Group results by tier for sorted display tier_groups = {} for word, similarity, tier in results: if tier not in tier_groups: tier_groups[tier] = [] tier_groups[tier].append((word, similarity)) # Sort tiers from most common to least common tier_order = [ "tier_1_ultra_common", "tier_2_extremely_common", "tier_3_very_common", "tier_4_highly_common", "tier_5_common", "tier_6_moderately_common", "tier_7_somewhat_uncommon", "tier_8_uncommon", "tier_9_rare", "tier_10_very_rare" ] # Display results sorted by tier for tier in tier_order: if tier in tier_groups: tier_desc = generator.tier_descriptions.get(tier, tier) print(f"\n šŸ“Š {tier_desc}:") # Sort words within tier alphabetically tier_words = sorted(tier_groups[tier], key=lambda x: x[0]) for word, similarity in tier_words: print(f" {word:<15} (similarity: {similarity:.3f})") else: print(" No words found. Try a different topic or tier.") except Exception as e: print(f" āŒ Error generating words: {e}") except KeyboardInterrupt: print("\n\nšŸ‘‹ Interrupted by user") break except Exception as e: print(f"āŒ Error: {e}") print("Type 'help' for available commands") print("\nāœ… Thanks for using Unified Thematic Word Generator!") if __name__ == "__main__": main()