Spaces:
Sleeping
Sleeping
File size: 9,330 Bytes
15c7d5c 0f58622 cc2fcc8 0f58622 8b25124 15c7d5c dc7e417 15c7d5c 0f58622 15c7d5c e9780b1 15c7d5c 0f58622 e9780b1 cc2fcc8 e9780b1 cc2fcc8 e9780b1 cc2fcc8 0f58622 15c7d5c 0f58622 15c7d5c 0f58622 e9780b1 0f58622 cc2fcc8 0f58622 e9780b1 0f58622 15c7d5c 0f58622 15c7d5c 0f58622 15c7d5c e9780b1 0f58622 15c7d5c 0f58622 cc2fcc8 15c7d5c 0f58622 e9780b1 15c7d5c 0f58622 15c7d5c 0f58622 cc2fcc8 0f58622 15c7d5c 0f58622 e9780b1 0f58622 15c7d5c 0f58622 cc2fcc8 15c7d5c 0f58622 e9780b1 15c7d5c 0f58622 15c7d5c 0f58622 e9780b1 0f58622 e9780b1 cc2fcc8 15c7d5c e9780b1 cc2fcc8 0f58622 e9780b1 cc2fcc8 15c7d5c cc2fcc8 0f58622 e9780b1 cc2fcc8 15c7d5c 0f58622 e9780b1 0f58622 dc7e417 e9780b1 0f58622 e9780b1 15c7d5c e9780b1 0f58622 e9780b1 0f58622 e9780b1 15c7d5c 0f58622 cc2fcc8 0f58622 dc7e417 e9780b1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 |
from typing import Dict, List
import aiohttp
import asyncio
import re
import torch
from sentence_transformers import SentenceTransformer, util
from bs4 import BeautifulSoup
class DynamicRecommender:
def __init__(self):
self.headers = {
'User-Agent': (
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/100.0.4896.75 Safari/537.36'
)
}
# Load SentenceTransformer for embedding-based recommendations
self.model = SentenceTransformer('all-mpnet-base-v2')
# Pre‐define broad candidate categories. Adjust to your needs.
self.candidate_categories = [
"tech gadgets",
"programming books",
"self help books",
"business books",
"leadership novels",
"fashion accessories",
"beauty products",
"board games",
"music instruments",
"cooking utensils",
"cookbooks",
"art and painting supplies", # covers user "art" interest
"home decor",
"pet supplies",
"novels",
"gaming consoles",
"smartphones",
"camera gear",
"toys",
"gift hamper"
]
# Pre‐encode category texts
self.category_embeddings = self.model.encode(self.candidate_categories, convert_to_tensor=True)
# ------------------------------------------------------------------
# Amazon search
# ------------------------------------------------------------------
async def search_amazon(self, query: str) -> List[Dict]:
print(f"Searching Amazon for: {query}")
search_url = f"https://www.amazon.in/s?k={query}"
async with aiohttp.ClientSession() as session:
async with session.get(search_url, headers=self.headers) as response:
if response.status == 200:
html = await response.text()
return self._parse_amazon_results(html)
return []
def _parse_amazon_results(self, html: str) -> List[Dict]:
soup = BeautifulSoup(html, 'html.parser')
products = []
# (Might need to tweak if Amazon changes HTML)
search_items = soup.select('.s-result-item')
for item in search_items:
try:
name_elem = item.select_one('.a-text-normal')
price_elem = item.select_one('.a-price-whole')
link_elem = item.select_one('a.a-link-normal')
if name_elem and price_elem and link_elem:
product_name = name_elem.get_text(strip=True)
product_price = price_elem.get_text(strip=True)
product_url = link_elem.get('href')
products.append({
'name': product_name,
'price': product_price,
'source': 'Amazon',
'url': 'https://www.amazon.in' + product_url,
'description': f"From Amazon: {product_name}"
})
except Exception:
continue
return products[:5]
# ------------------------------------------------------------------
# Flipkart search
# ------------------------------------------------------------------
async def search_flipkart(self, query: str) -> List[Dict]:
print(f"Searching Flipkart for: {query}")
search_url = f"https://www.flipkart.com/search?q={query}"
async with aiohttp.ClientSession() as session:
async with session.get(search_url, headers=self.headers) as response:
if response.status == 200:
html = await response.text()
return self._parse_flipkart_results(html)
return []
def _parse_flipkart_results(self, html: str) -> List[Dict]:
soup = BeautifulSoup(html, 'html.parser')
products = []
# (Might need to tweak if Flipkart changes HTML)
item_cards = soup.select('._1AtVbE')
for item in item_cards:
try:
name_elem = item.select_one('._4rR01T')
price_elem = item.select_one('._30jeq3')
link_elem = item.select_one('a')
if name_elem and price_elem and link_elem:
product_name = name_elem.get_text(strip=True)
product_price = price_elem.get_text(strip=True)
product_url = link_elem.get('href')
products.append({
'name': product_name,
'price': product_price,
'source': 'Flipkart',
'url': 'https://www.flipkart.com' + product_url,
'description': f"From Flipkart: {product_name}"
})
except Exception:
continue
return products[:5]
# ------------------------------------------------------------------
# IGP search
# ------------------------------------------------------------------
async def search_igp(self, query: str) -> List[Dict]:
print(f"Searching IGP for: {query}")
search_url = f"https://www.igp.com/search/{query}"
async with aiohttp.ClientSession() as session:
async with session.get(search_url, headers=self.headers) as response:
if response.status == 200:
html = await response.text()
return self._parse_igp_results(html)
return []
def _parse_igp_results(self, html: str) -> List[Dict]:
soup = BeautifulSoup(html, 'html.parser')
products = []
# (Likely need to tweak if IGP changes HTML)
item_cards = soup.select('.product-item')
for item in item_cards:
try:
name_elem = item.select_one('.product-title')
price_elem = item.select_one('.product-price')
link_elem = item.select_one('a')
if name_elem and price_elem and link_elem:
product_name = name_elem.get_text(strip=True)
product_price = price_elem.get_text(strip=True)
product_url = link_elem.get('href')
products.append({
'name': product_name,
'price': product_price,
'source': 'IGP',
'url': 'https://www.igp.com' + product_url,
'description': f"From IGP: {product_name}"
})
except Exception:
continue
return products[:5]
# ------------------------------------------------------------------
# Embedding-based category extraction
# ------------------------------------------------------------------
def _extract_categories(self, text: str) -> List[str]:
# 1. Check for age with a regex
age_match = re.search(r'age\s+(\d+)', text.lower())
age = age_match.group(1) if age_match else None
# 2. Encode user text
user_emb = self.model.encode(text, convert_to_tensor=True)
# 3. Cosine similarity with candidate categories
sims = util.cos_sim(user_emb, self.category_embeddings)[0]
top_k = min(3, len(self.candidate_categories)) # pick top 3
top_results = torch.topk(sims, k=top_k)
best_categories = []
for idx in top_results.indices:
cat_text = self.candidate_categories[idx]
if age:
cat_text = f"{cat_text} for {age} year old"
best_categories.append(cat_text)
print("Top categories chosen via embeddings:", best_categories)
return best_categories
# ------------------------------------------------------------------
# Main recommendations
# ------------------------------------------------------------------
async def get_recommendations(self, text: str) -> List[Dict]:
"""
Search across Amazon, Flipkart, IGP based on top embedding matches,
then deduplicate, then return final list.
"""
try:
# 1) Get top matching categories from user text
categories = self._extract_categories(text)
# 2) For each category, search across sites
all_products = []
for c in categories:
amazon_products = await self.search_amazon(c)
flipkart_products = await self.search_flipkart(c)
igp_products = await self.search_igp(c)
all_products.extend(amazon_products + flipkart_products + igp_products)
# 3) Deduplicate
seen = set()
unique_products = []
for product in all_products:
if product['name'] not in seen:
seen.add(product['name'])
unique_products.append(product)
return unique_products[:5]
except Exception as e:
print(f"Error in get_recommendations: {str(e)}")
return [] |