Spaces:
Sleeping
Sleeping
from typing import Dict, List | |
import aiohttp | |
import asyncio | |
import re | |
import torch | |
from sentence_transformers import SentenceTransformer, util | |
from bs4 import BeautifulSoup | |
class DynamicRecommender: | |
def __init__(self): | |
self.headers = { | |
'User-Agent': ( | |
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' | |
'AppleWebKit/537.36 (KHTML, like Gecko) ' | |
'Chrome/100.0.4896.75 Safari/537.36' | |
) | |
} | |
# Load SentenceTransformer for embedding-based recommendations | |
self.model = SentenceTransformer('all-mpnet-base-v2') | |
# Pre‐define broad candidate categories. Adjust to your needs. | |
self.candidate_categories = [ | |
"tech gadgets", | |
"programming books", | |
"self help books", | |
"business books", | |
"leadership novels", | |
"fashion accessories", | |
"beauty products", | |
"board games", | |
"music instruments", | |
"cooking utensils", | |
"cookbooks", | |
"art and painting supplies", # covers user "art" interest | |
"home decor", | |
"pet supplies", | |
"novels", | |
"gaming consoles", | |
"smartphones", | |
"camera gear", | |
"toys", | |
"gift hamper" | |
] | |
# Pre‐encode category texts | |
self.category_embeddings = self.model.encode(self.candidate_categories, convert_to_tensor=True) | |
# ------------------------------------------------------------------ | |
# Amazon search | |
# ------------------------------------------------------------------ | |
async def search_amazon(self, query: str) -> List[Dict]: | |
print(f"Searching Amazon for: {query}") | |
search_url = f"https://www.amazon.in/s?k={query}" | |
async with aiohttp.ClientSession() as session: | |
async with session.get(search_url, headers=self.headers) as response: | |
if response.status == 200: | |
html = await response.text() | |
return self._parse_amazon_results(html) | |
return [] | |
def _parse_amazon_results(self, html: str) -> List[Dict]: | |
soup = BeautifulSoup(html, 'html.parser') | |
products = [] | |
# (Might need to tweak if Amazon changes HTML) | |
search_items = soup.select('.s-result-item') | |
for item in search_items: | |
try: | |
name_elem = item.select_one('.a-text-normal') | |
price_elem = item.select_one('.a-price-whole') | |
link_elem = item.select_one('a.a-link-normal') | |
if name_elem and price_elem and link_elem: | |
product_name = name_elem.get_text(strip=True) | |
product_price = price_elem.get_text(strip=True) | |
product_url = link_elem.get('href') | |
products.append({ | |
'name': product_name, | |
'price': product_price, | |
'source': 'Amazon', | |
'url': 'https://www.amazon.in' + product_url, | |
'description': f"From Amazon: {product_name}" | |
}) | |
except Exception: | |
continue | |
return products[:5] | |
# ------------------------------------------------------------------ | |
# Flipkart search | |
# ------------------------------------------------------------------ | |
async def search_flipkart(self, query: str) -> List[Dict]: | |
print(f"Searching Flipkart for: {query}") | |
search_url = f"https://www.flipkart.com/search?q={query}" | |
async with aiohttp.ClientSession() as session: | |
async with session.get(search_url, headers=self.headers) as response: | |
if response.status == 200: | |
html = await response.text() | |
return self._parse_flipkart_results(html) | |
return [] | |
def _parse_flipkart_results(self, html: str) -> List[Dict]: | |
soup = BeautifulSoup(html, 'html.parser') | |
products = [] | |
# (Might need to tweak if Flipkart changes HTML) | |
item_cards = soup.select('._1AtVbE') | |
for item in item_cards: | |
try: | |
name_elem = item.select_one('._4rR01T') | |
price_elem = item.select_one('._30jeq3') | |
link_elem = item.select_one('a') | |
if name_elem and price_elem and link_elem: | |
product_name = name_elem.get_text(strip=True) | |
product_price = price_elem.get_text(strip=True) | |
product_url = link_elem.get('href') | |
products.append({ | |
'name': product_name, | |
'price': product_price, | |
'source': 'Flipkart', | |
'url': 'https://www.flipkart.com' + product_url, | |
'description': f"From Flipkart: {product_name}" | |
}) | |
except Exception: | |
continue | |
return products[:5] | |
# ------------------------------------------------------------------ | |
# IGP search | |
# ------------------------------------------------------------------ | |
async def search_igp(self, query: str) -> List[Dict]: | |
print(f"Searching IGP for: {query}") | |
search_url = f"https://www.igp.com/search/{query}" | |
async with aiohttp.ClientSession() as session: | |
async with session.get(search_url, headers=self.headers) as response: | |
if response.status == 200: | |
html = await response.text() | |
return self._parse_igp_results(html) | |
return [] | |
def _parse_igp_results(self, html: str) -> List[Dict]: | |
soup = BeautifulSoup(html, 'html.parser') | |
products = [] | |
# (Likely need to tweak if IGP changes HTML) | |
item_cards = soup.select('.product-item') | |
for item in item_cards: | |
try: | |
name_elem = item.select_one('.product-title') | |
price_elem = item.select_one('.product-price') | |
link_elem = item.select_one('a') | |
if name_elem and price_elem and link_elem: | |
product_name = name_elem.get_text(strip=True) | |
product_price = price_elem.get_text(strip=True) | |
product_url = link_elem.get('href') | |
products.append({ | |
'name': product_name, | |
'price': product_price, | |
'source': 'IGP', | |
'url': 'https://www.igp.com' + product_url, | |
'description': f"From IGP: {product_name}" | |
}) | |
except Exception: | |
continue | |
return products[:5] | |
# ------------------------------------------------------------------ | |
# Embedding-based category extraction | |
# ------------------------------------------------------------------ | |
def _extract_categories(self, text: str) -> List[str]: | |
# 1. Check for age with a regex | |
age_match = re.search(r'age\s+(\d+)', text.lower()) | |
age = age_match.group(1) if age_match else None | |
# 2. Encode user text | |
user_emb = self.model.encode(text, convert_to_tensor=True) | |
# 3. Cosine similarity with candidate categories | |
sims = util.cos_sim(user_emb, self.category_embeddings)[0] | |
top_k = min(3, len(self.candidate_categories)) # pick top 3 | |
top_results = torch.topk(sims, k=top_k) | |
best_categories = [] | |
for idx in top_results.indices: | |
cat_text = self.candidate_categories[idx] | |
if age: | |
cat_text = f"{cat_text} for {age} year old" | |
best_categories.append(cat_text) | |
print("Top categories chosen via embeddings:", best_categories) | |
return best_categories | |
# ------------------------------------------------------------------ | |
# Main recommendations | |
# ------------------------------------------------------------------ | |
async def get_recommendations(self, text: str) -> List[Dict]: | |
""" | |
Search across Amazon, Flipkart, IGP based on top embedding matches, | |
then deduplicate, then return final list. | |
""" | |
try: | |
# 1) Get top matching categories from user text | |
categories = self._extract_categories(text) | |
# 2) For each category, search across sites | |
all_products = [] | |
for c in categories: | |
amazon_products = await self.search_amazon(c) | |
flipkart_products = await self.search_flipkart(c) | |
igp_products = await self.search_igp(c) | |
all_products.extend(amazon_products + flipkart_products + igp_products) | |
# 3) Deduplicate | |
seen = set() | |
unique_products = [] | |
for product in all_products: | |
if product['name'] not in seen: | |
seen.add(product['name']) | |
unique_products.append(product) | |
return unique_products[:5] | |
except Exception as e: | |
print(f"Error in get_recommendations: {str(e)}") | |
return [] |