File size: 6,297 Bytes
486eff6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
#!/usr/bin/env python3
"""
Word Similarity Engine using Dictionary Embeddings

Reads dictionary from CSV, creates embeddings for all words,
and provides similarity search functionality.
"""

import os
import csv
import numpy as np
from typing import List, Tuple
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity


class WordSimilarityEngine:
    def __init__(self, cache_dir: str = None):
        """Initialize the word similarity engine.
        
        Args:
            cache_dir: Directory to cache the embedding model
        """
        if cache_dir is None:
            cache_dir = os.path.join(os.path.dirname(__file__), 'model_cache')
        
        self.cache_dir = cache_dir
        os.makedirs(cache_dir, exist_ok=True)
        
        # Load embedding model with caching
        print("Loading embedding model...")
        self.model = SentenceTransformer(
            'sentence-transformers/all-mpnet-base-v2',
            cache_folder=cache_dir
        )
        print("Model loaded successfully.")
        
        # Load dictionary and create embeddings
        self.words = self._load_dictionary()
        print(f"Loaded {len(self.words)} words from dictionary.")
        
        print("Creating embeddings for all words...")
        self.embeddings = self._create_embeddings()
        print("Embeddings created successfully.")
    
    def _load_dictionary(self) -> List[str]:
        """Load words from the dictionary CSV file."""
        dict_path = os.path.join(os.path.dirname(__file__), 'dict-words', 'dict.csv')
        words = []
        
        try:
            with open(dict_path, 'r', encoding='utf-8') as csvfile:
                reader = csv.DictReader(csvfile)
                for row in reader:
                    word = row['word'].strip().lower()
                    if word and len(word) > 1:  # Include words with 2+ characters
                        words.append(word)
        except FileNotFoundError:
            raise Exception(f"Dictionary file not found: {dict_path}")
        except Exception as e:
            raise Exception(f"Error reading dictionary: {e}")
        
        return words
    
    def _create_embeddings(self) -> np.ndarray:
        """Create embeddings for all dictionary words."""
        # Create embeddings in batches for efficiency
        batch_size = 256
        all_embeddings = []
        
        for i in range(0, len(self.words), batch_size):
            batch_words = self.words[i:i + batch_size]
            batch_embeddings = self.model.encode(
                batch_words,
                convert_to_tensor=False,
                show_progress_bar=True if i == 0 else False
            )
            all_embeddings.append(batch_embeddings)
        
        return np.vstack(all_embeddings)
    
    def find_similar_words(self, word: str, top_k: int = 10, min_similarity: float = 0.3) -> List[Tuple[str, float]]:
        """Find words similar to the input word.
        
        Args:
            word: Input word to find similarities for
            top_k: Number of similar words to return
            min_similarity: Minimum similarity threshold
            
        Returns:
            List of tuples (word, similarity_score) sorted by similarity
        """
        word = word.strip().lower()
        
        # Check if word exists in our dictionary
        if word not in self.words:
            print(f"Warning: '{word}' not found in dictionary. Computing similarity anyway...")
        
        # Get embedding for input word
        input_embedding = self.model.encode([word])
        
        # Compute similarities with all dictionary words
        similarities = cosine_similarity(input_embedding, self.embeddings)[0]
        
        # Get indices of most similar words
        similar_indices = np.argsort(similarities)[::-1]
        
        # Filter and format results
        results = []
        for idx in similar_indices:
            similarity_score = similarities[idx]
            similar_word = self.words[idx]
            
            # Skip the input word itself and apply minimum threshold
            if similar_word != word and similarity_score >= min_similarity:
                results.append((similar_word, similarity_score))
                
                if len(results) >= top_k:
                    break
        
        return results
    
    def get_word_embedding(self, word: str) -> np.ndarray:
        """Get embedding for a specific word."""
        return self.model.encode([word.strip().lower()])[0]


def main():
    """Demo the word similarity functionality."""
    # Initialize the engine
    engine = WordSimilarityEngine()
    
    # Test words
    test_words = ["cat", "science", "computer", "ocean", "music"]
    
    print("\n" + "="*60)
    print("WORD SIMILARITY DEMO")
    print("="*60)
    
    for test_word in test_words:
        print(f"\nWords similar to '{test_word}':")
        print("-" * 30)
        
        similar_words = engine.find_similar_words(test_word, top_k=8)
        
        if similar_words:
            for word, score in similar_words:
                print(f"  {word:<15} (similarity: {score:.3f})")
        else:
            print("  No similar words found.")
    
    # Interactive mode
    print("\n" + "="*60)
    print("INTERACTIVE MODE (type 'quit' to exit)")
    print("="*60)
    
    while True:
        try:
            user_word = input("\nEnter a word to find similar words: ").strip()
            
            if user_word.lower() == 'quit':
                break
            
            if not user_word:
                continue
            
            print(f"\nWords similar to '{user_word}':")
            print("-" * 30)
            
            similar_words = engine.find_similar_words(user_word, top_k=50)
            
            if similar_words:
                for word, score in similar_words:
                    print(f"  {word:<15} (similarity: {score:.3f})")
            else:
                print("  No similar words found.")
                
        except KeyboardInterrupt:
            break
        except Exception as e:
            print(f"Error: {e}")
    
    print("\nGoodbye!")


if __name__ == "__main__":
    main()