feat: update dependencies and ignore files
Browse filesUpdate backend dependencies to latest versions and add .qwenignore file. Also, update .gitignore to ignore .bmad directory and unignore .qwen directory. Update Linkedin_poster_dev submodule to latest commit.
- .gitignore +3 -1
- Linkedin_poster_dev +1 -1
- backend/scheduler/apscheduler_service.py +82 -68
- sprint_change_proposal.md +0 -87
.gitignore
CHANGED
|
@@ -173,8 +173,10 @@ tests/
|
|
| 173 |
docker-compose.override.yml
|
| 174 |
|
| 175 |
# BMAD
|
|
|
|
| 176 |
.bmad-core/
|
| 177 |
.kilocode/
|
| 178 |
docs/
|
| 179 |
backend/tests/
|
| 180 |
-
.qwen/
|
|
|
|
|
|
| 173 |
docker-compose.override.yml
|
| 174 |
|
| 175 |
# BMAD
|
| 176 |
+
.bmad/
|
| 177 |
.bmad-core/
|
| 178 |
.kilocode/
|
| 179 |
docs/
|
| 180 |
backend/tests/
|
| 181 |
+
# .qwen/
|
| 182 |
+
.qwenignore
|
Linkedin_poster_dev
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
Subproject commit
|
|
|
|
| 1 |
+
Subproject commit d84516dcdeb9e85fb0d9c89c19fcc4b02d458168
|
backend/scheduler/apscheduler_service.py
CHANGED
|
@@ -23,43 +23,43 @@ logger = logging.getLogger(__name__)
|
|
| 23 |
|
| 24 |
class APSchedulerService:
|
| 25 |
"""Service for managing APScheduler tasks."""
|
| 26 |
-
|
| 27 |
def __init__(self, app=None):
|
| 28 |
self.app = app
|
| 29 |
self.scheduler = None
|
| 30 |
self.supabase_client = None
|
| 31 |
-
|
| 32 |
# Initialize scheduler if app is provided
|
| 33 |
if app is not None:
|
| 34 |
self.init_app(app)
|
| 35 |
-
|
| 36 |
def init_app(self, app):
|
| 37 |
"""Initialize the scheduler with the Flask app."""
|
| 38 |
try:
|
| 39 |
self.app = app
|
| 40 |
-
|
| 41 |
logger.info("🚀 APScheduler starting...")
|
| 42 |
-
|
| 43 |
# Initialize Supabase client
|
| 44 |
self.supabase_client = init_supabase(
|
| 45 |
app.config['SUPABASE_URL'],
|
| 46 |
app.config['SUPABASE_KEY']
|
| 47 |
)
|
| 48 |
-
|
| 49 |
# Configure job stores and executors
|
| 50 |
jobstores = {
|
| 51 |
'default': MemoryJobStore()
|
| 52 |
}
|
| 53 |
-
|
| 54 |
executors = {
|
| 55 |
'default': ThreadPoolExecutor(20),
|
| 56 |
}
|
| 57 |
-
|
| 58 |
job_defaults = {
|
| 59 |
'coalesce': False,
|
| 60 |
'max_instances': 3
|
| 61 |
}
|
| 62 |
-
|
| 63 |
# Create scheduler
|
| 64 |
self.scheduler = BackgroundScheduler(
|
| 65 |
jobstores=jobstores,
|
|
@@ -67,14 +67,14 @@ class APSchedulerService:
|
|
| 67 |
job_defaults=job_defaults,
|
| 68 |
timezone='UTC'
|
| 69 |
)
|
| 70 |
-
|
| 71 |
# Add the scheduler to the app
|
| 72 |
app.scheduler = self
|
| 73 |
-
|
| 74 |
# Start the scheduler
|
| 75 |
self.scheduler.start()
|
| 76 |
logger.info("✅ APScheduler started successfully")
|
| 77 |
-
|
| 78 |
# Add the periodic job to load schedules from database
|
| 79 |
self.scheduler.add_job(
|
| 80 |
func=self.load_schedules,
|
|
@@ -83,15 +83,15 @@ class APSchedulerService:
|
|
| 83 |
name='Load schedules from database',
|
| 84 |
replace_existing=True
|
| 85 |
)
|
| 86 |
-
|
| 87 |
# Load schedules immediately when the app starts
|
| 88 |
self.load_schedules()
|
| 89 |
-
|
| 90 |
except Exception as e:
|
| 91 |
logger.error(f"❌ APScheduler initialization failed: {str(e)}")
|
| 92 |
import traceback
|
| 93 |
logger.error(traceback.format_exc())
|
| 94 |
-
|
| 95 |
def load_schedules(self):
|
| 96 |
"""Load schedules from the database and create jobs."""
|
| 97 |
try:
|
|
@@ -100,7 +100,7 @@ class APSchedulerService:
|
|
| 100 |
if not self.supabase_client:
|
| 101 |
logger.error("❌ Supabase client not initialized")
|
| 102 |
return
|
| 103 |
-
|
| 104 |
# Fetch all schedules from Supabase
|
| 105 |
response = (
|
| 106 |
self.supabase_client
|
|
@@ -108,38 +108,38 @@ class APSchedulerService:
|
|
| 108 |
.select("*, Social_network(id_utilisateur, token, sub)")
|
| 109 |
.execute()
|
| 110 |
)
|
| 111 |
-
|
| 112 |
schedules = response.data if response.data else []
|
| 113 |
logger.info(f"📋 Found {len(schedules)} schedules in database")
|
| 114 |
-
|
| 115 |
# Remove existing scheduled jobs (except the loader job)
|
| 116 |
jobs_to_remove = []
|
| 117 |
for job in self.scheduler.get_jobs():
|
| 118 |
if job.id != 'load_schedules':
|
| 119 |
jobs_to_remove.append(job.id)
|
| 120 |
-
|
| 121 |
for job_id in jobs_to_remove:
|
| 122 |
try:
|
| 123 |
self.scheduler.remove_job(job_id)
|
| 124 |
except Exception as e:
|
| 125 |
logger.warning(f"Failed to remove job {job_id}: {str(e)}")
|
| 126 |
-
|
| 127 |
# Create jobs for each schedule
|
| 128 |
for schedule in schedules:
|
| 129 |
try:
|
| 130 |
schedule_id = schedule.get('id')
|
| 131 |
schedule_time = schedule.get('schedule_time')
|
| 132 |
adjusted_time = schedule.get('adjusted_time')
|
| 133 |
-
|
| 134 |
if not schedule_time or not adjusted_time:
|
| 135 |
logger.warning(f"⚠️ Invalid schedule format for schedule {schedule_id}")
|
| 136 |
continue
|
| 137 |
-
|
| 138 |
# Parse timezone information
|
| 139 |
server_timezone = get_server_timezone()
|
| 140 |
schedule_time_part, schedule_timezone = parse_timezone_schedule(schedule_time)
|
| 141 |
adjusted_time_part, adjusted_timezone = parse_timezone_schedule(adjusted_time)
|
| 142 |
-
|
| 143 |
# Convert to server timezone for APScheduler
|
| 144 |
if schedule_timezone and validate_timezone(schedule_timezone):
|
| 145 |
server_schedule_time = convert_time_to_timezone(schedule_time_part, schedule_timezone, server_timezone)
|
|
@@ -148,11 +148,11 @@ class APSchedulerService:
|
|
| 148 |
# Use original time if no valid timezone
|
| 149 |
server_schedule_time = schedule_time_part
|
| 150 |
server_adjusted_time = adjusted_time_part
|
| 151 |
-
|
| 152 |
# Parse schedule times for server timezone
|
| 153 |
content_gen_cron = self._parse_schedule_time(server_adjusted_time)
|
| 154 |
publish_cron = self._parse_schedule_time(server_schedule_time)
|
| 155 |
-
|
| 156 |
# Create content generation job (5 minutes before publishing)
|
| 157 |
gen_job_id = f"gen_{schedule_id}"
|
| 158 |
self.scheduler.add_job(
|
|
@@ -167,7 +167,7 @@ class APSchedulerService:
|
|
| 167 |
args=[schedule.get('Social_network', {}).get('id_utilisateur'), schedule_id],
|
| 168 |
replace_existing=True
|
| 169 |
)
|
| 170 |
-
|
| 171 |
# Create publishing job
|
| 172 |
pub_job_id = f"pub_{schedule_id}"
|
| 173 |
self.scheduler.add_job(
|
|
@@ -182,29 +182,29 @@ class APSchedulerService:
|
|
| 182 |
args=[schedule_id],
|
| 183 |
replace_existing=True
|
| 184 |
)
|
| 185 |
-
|
| 186 |
logger.info(f"📅 Created schedule jobs for {schedule_id}")
|
| 187 |
-
|
| 188 |
except Exception as e:
|
| 189 |
logger.error(f"❌ Error creating jobs for schedule {schedule.get('id')}: {str(e)}")
|
| 190 |
-
|
| 191 |
except Exception as e:
|
| 192 |
logger.error(f"❌ Error loading schedules: {str(e)}")
|
| 193 |
-
|
| 194 |
def _parse_schedule_time(self, schedule_time):
|
| 195 |
"""
|
| 196 |
Parse schedule time string into cron format.
|
| 197 |
-
|
| 198 |
Args:
|
| 199 |
schedule_time (str): Schedule time in format "Day HH:MM"
|
| 200 |
-
|
| 201 |
Returns:
|
| 202 |
dict: Cron parameters
|
| 203 |
"""
|
| 204 |
try:
|
| 205 |
day_name, time_str = schedule_time.split()
|
| 206 |
hour, minute = map(int, time_str.split(':'))
|
| 207 |
-
|
| 208 |
# Map day names to cron format
|
| 209 |
day_map = {
|
| 210 |
'Monday': 0,
|
|
@@ -215,7 +215,7 @@ class APSchedulerService:
|
|
| 215 |
'Saturday': 5,
|
| 216 |
'Sunday': 6
|
| 217 |
}
|
| 218 |
-
|
| 219 |
day_of_week = day_map.get(day_name, '*')
|
| 220 |
return {
|
| 221 |
'minute': minute,
|
|
@@ -230,26 +230,26 @@ class APSchedulerService:
|
|
| 230 |
'hour': '*',
|
| 231 |
'day_of_week': '*'
|
| 232 |
}
|
| 233 |
-
|
| 234 |
def generate_content_task(self, user_id: str, schedule_id: str):
|
| 235 |
"""
|
| 236 |
APScheduler task to generate content for a scheduled post.
|
| 237 |
-
|
| 238 |
Args:
|
| 239 |
user_id (str): User ID
|
| 240 |
schedule_id (str): Schedule ID
|
| 241 |
"""
|
| 242 |
try:
|
| 243 |
logger.info(f"🎨 Generating content for schedule {schedule_id}")
|
| 244 |
-
|
| 245 |
# Run within application context
|
| 246 |
with self.app.app_context():
|
| 247 |
# Initialize content service
|
| 248 |
content_service = ContentService()
|
| 249 |
-
|
| 250 |
# Generate content using content service
|
| 251 |
generated_result = content_service.generate_post_content(user_id)
|
| 252 |
-
|
| 253 |
# Ensure proper extraction of text content and image data from tuple
|
| 254 |
# ContentService.generate_post_content() always returns a tuple: (text_content, image_data)
|
| 255 |
if isinstance(generated_result, (tuple, list)) and len(generated_result) >= 1:
|
|
@@ -257,7 +257,7 @@ class APSchedulerService:
|
|
| 257 |
text_content = generated_result[0] if generated_result[0] is not None else "Generated content will appear here..."
|
| 258 |
# Extract image data (second element) if it exists
|
| 259 |
image_data = generated_result[1] if len(generated_result) >= 2 and generated_result[1] is not None else None
|
| 260 |
-
|
| 261 |
# Additional safeguard: ensure text_content is always a string, never a list/tuple
|
| 262 |
if not isinstance(text_content, str):
|
| 263 |
text_content = str(text_content) if text_content is not None else "Generated content will appear here..."
|
|
@@ -265,12 +265,12 @@ class APSchedulerService:
|
|
| 265 |
# Fallback for unexpected return types
|
| 266 |
text_content = str(generated_result) if generated_result is not None else "Generated content will appear here..."
|
| 267 |
image_data = None
|
| 268 |
-
|
| 269 |
# Final validation to ensure text_content is never stored as a list/tuple
|
| 270 |
if isinstance(text_content, (list, tuple)):
|
| 271 |
# Convert list/tuple to string representation
|
| 272 |
text_content = str(text_content)
|
| 273 |
-
|
| 274 |
# Process image data for proper storage
|
| 275 |
processed_image_data = None
|
| 276 |
if image_data is not None:
|
|
@@ -282,7 +282,7 @@ class APSchedulerService:
|
|
| 282 |
logger.error(f"❌ Error processing image data for schedule {schedule_id}: {str(e)}")
|
| 283 |
# Continue with text content even if image processing fails
|
| 284 |
processed_image_data = None
|
| 285 |
-
|
| 286 |
# Store generated content in database
|
| 287 |
# We need to get the social account ID from the schedule
|
| 288 |
schedule_response = (
|
|
@@ -292,12 +292,12 @@ class APSchedulerService:
|
|
| 292 |
.eq("id", schedule_id)
|
| 293 |
.execute()
|
| 294 |
)
|
| 295 |
-
|
| 296 |
if not schedule_response.data:
|
| 297 |
raise Exception(f"Schedule {schedule_id} not found")
|
| 298 |
-
|
| 299 |
social_account_id = schedule_response.data[0]['id_social']
|
| 300 |
-
|
| 301 |
# Prepare post data
|
| 302 |
post_data = {
|
| 303 |
"id_social": social_account_id,
|
|
@@ -305,11 +305,11 @@ class APSchedulerService:
|
|
| 305 |
"is_published": False,
|
| 306 |
"sched": schedule_id
|
| 307 |
}
|
| 308 |
-
|
| 309 |
# Add processed image data if present
|
| 310 |
if processed_image_data is not None:
|
| 311 |
post_data["image_content_url"] = processed_image_data
|
| 312 |
-
|
| 313 |
# Store the generated content
|
| 314 |
response = (
|
| 315 |
self.supabase_client
|
|
@@ -317,25 +317,25 @@ class APSchedulerService:
|
|
| 317 |
.insert(post_data)
|
| 318 |
.execute()
|
| 319 |
)
|
| 320 |
-
|
| 321 |
if response.data:
|
| 322 |
logger.info(f"✅ Content generated and stored for schedule {schedule_id}")
|
| 323 |
else:
|
| 324 |
logger.error(f"❌ Failed to store generated content for schedule {schedule_id}")
|
| 325 |
-
|
| 326 |
except Exception as e:
|
| 327 |
logger.error(f"❌ Error in content generation task for schedule {schedule_id}: {str(e)}")
|
| 328 |
-
|
| 329 |
def publish_post_task(self, schedule_id: str):
|
| 330 |
"""
|
| 331 |
APScheduler task to publish a scheduled post.
|
| 332 |
-
|
| 333 |
Args:
|
| 334 |
schedule_id (str): Schedule ID
|
| 335 |
"""
|
| 336 |
try:
|
| 337 |
logger.info(f"🚀 Publishing post for schedule {schedule_id}")
|
| 338 |
-
|
| 339 |
# Run within application context
|
| 340 |
with self.app.app_context():
|
| 341 |
# Fetch the post to publish
|
|
@@ -349,16 +349,19 @@ class APSchedulerService:
|
|
| 349 |
.limit(1)
|
| 350 |
.execute()
|
| 351 |
)
|
| 352 |
-
|
| 353 |
if not response.data:
|
| 354 |
logger.info(f"📭 No unpublished posts found for schedule {schedule_id}")
|
| 355 |
return
|
| 356 |
-
|
| 357 |
post = response.data[0]
|
| 358 |
post_id = post.get('id')
|
| 359 |
text_content = post.get('Text_content')
|
| 360 |
image_url = post.get('image_content_url')
|
| 361 |
-
|
|
|
|
|
|
|
|
|
|
| 362 |
# Get social network credentials
|
| 363 |
schedule_response = (
|
| 364 |
self.supabase_client
|
|
@@ -367,24 +370,34 @@ class APSchedulerService:
|
|
| 367 |
.eq("id", schedule_id)
|
| 368 |
.execute()
|
| 369 |
)
|
| 370 |
-
|
| 371 |
if not schedule_response.data:
|
| 372 |
-
|
| 373 |
-
|
|
|
|
| 374 |
social_network = schedule_response.data[0].get('Social_network', {})
|
| 375 |
access_token = social_network.get('token')
|
| 376 |
user_sub = social_network.get('sub')
|
| 377 |
-
|
| 378 |
if not access_token or not user_sub:
|
| 379 |
logger.error(f"❌ Missing social network credentials for schedule {schedule_id}")
|
| 380 |
return
|
| 381 |
-
|
| 382 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 383 |
linkedin_service = LinkedInService()
|
|
|
|
|
|
|
| 384 |
publish_response = linkedin_service.publish_post(
|
| 385 |
access_token, user_sub, text_content, image_url
|
| 386 |
)
|
| 387 |
-
|
|
|
|
|
|
|
| 388 |
# Update post status in database
|
| 389 |
update_response = (
|
| 390 |
self.supabase_client
|
|
@@ -393,12 +406,13 @@ class APSchedulerService:
|
|
| 393 |
.eq("id", post_id)
|
| 394 |
.execute()
|
| 395 |
)
|
| 396 |
-
|
| 397 |
-
logger.info(f"✅ Post published
|
| 398 |
-
|
| 399 |
except Exception as e:
|
| 400 |
logger.error(f"❌ Error in publishing task for schedule {schedule_id}: {str(e)}")
|
| 401 |
-
|
|
|
|
| 402 |
def trigger_immediate_update(self):
|
| 403 |
"""Trigger immediate schedule update."""
|
| 404 |
try:
|
|
@@ -408,7 +422,7 @@ class APSchedulerService:
|
|
| 408 |
except Exception as e:
|
| 409 |
logger.error(f"❌ Error triggering immediate schedule update: {str(e)}")
|
| 410 |
return False
|
| 411 |
-
|
| 412 |
def shutdown(self):
|
| 413 |
"""Shutdown the scheduler."""
|
| 414 |
if self.scheduler:
|
|
|
|
| 23 |
|
| 24 |
class APSchedulerService:
|
| 25 |
"""Service for managing APScheduler tasks."""
|
| 26 |
+
|
| 27 |
def __init__(self, app=None):
|
| 28 |
self.app = app
|
| 29 |
self.scheduler = None
|
| 30 |
self.supabase_client = None
|
| 31 |
+
|
| 32 |
# Initialize scheduler if app is provided
|
| 33 |
if app is not None:
|
| 34 |
self.init_app(app)
|
| 35 |
+
|
| 36 |
def init_app(self, app):
|
| 37 |
"""Initialize the scheduler with the Flask app."""
|
| 38 |
try:
|
| 39 |
self.app = app
|
| 40 |
+
|
| 41 |
logger.info("🚀 APScheduler starting...")
|
| 42 |
+
|
| 43 |
# Initialize Supabase client
|
| 44 |
self.supabase_client = init_supabase(
|
| 45 |
app.config['SUPABASE_URL'],
|
| 46 |
app.config['SUPABASE_KEY']
|
| 47 |
)
|
| 48 |
+
|
| 49 |
# Configure job stores and executors
|
| 50 |
jobstores = {
|
| 51 |
'default': MemoryJobStore()
|
| 52 |
}
|
| 53 |
+
|
| 54 |
executors = {
|
| 55 |
'default': ThreadPoolExecutor(20),
|
| 56 |
}
|
| 57 |
+
|
| 58 |
job_defaults = {
|
| 59 |
'coalesce': False,
|
| 60 |
'max_instances': 3
|
| 61 |
}
|
| 62 |
+
|
| 63 |
# Create scheduler
|
| 64 |
self.scheduler = BackgroundScheduler(
|
| 65 |
jobstores=jobstores,
|
|
|
|
| 67 |
job_defaults=job_defaults,
|
| 68 |
timezone='UTC'
|
| 69 |
)
|
| 70 |
+
|
| 71 |
# Add the scheduler to the app
|
| 72 |
app.scheduler = self
|
| 73 |
+
|
| 74 |
# Start the scheduler
|
| 75 |
self.scheduler.start()
|
| 76 |
logger.info("✅ APScheduler started successfully")
|
| 77 |
+
|
| 78 |
# Add the periodic job to load schedules from database
|
| 79 |
self.scheduler.add_job(
|
| 80 |
func=self.load_schedules,
|
|
|
|
| 83 |
name='Load schedules from database',
|
| 84 |
replace_existing=True
|
| 85 |
)
|
| 86 |
+
|
| 87 |
# Load schedules immediately when the app starts
|
| 88 |
self.load_schedules()
|
| 89 |
+
|
| 90 |
except Exception as e:
|
| 91 |
logger.error(f"❌ APScheduler initialization failed: {str(e)}")
|
| 92 |
import traceback
|
| 93 |
logger.error(traceback.format_exc())
|
| 94 |
+
|
| 95 |
def load_schedules(self):
|
| 96 |
"""Load schedules from the database and create jobs."""
|
| 97 |
try:
|
|
|
|
| 100 |
if not self.supabase_client:
|
| 101 |
logger.error("❌ Supabase client not initialized")
|
| 102 |
return
|
| 103 |
+
|
| 104 |
# Fetch all schedules from Supabase
|
| 105 |
response = (
|
| 106 |
self.supabase_client
|
|
|
|
| 108 |
.select("*, Social_network(id_utilisateur, token, sub)")
|
| 109 |
.execute()
|
| 110 |
)
|
| 111 |
+
|
| 112 |
schedules = response.data if response.data else []
|
| 113 |
logger.info(f"📋 Found {len(schedules)} schedules in database")
|
| 114 |
+
|
| 115 |
# Remove existing scheduled jobs (except the loader job)
|
| 116 |
jobs_to_remove = []
|
| 117 |
for job in self.scheduler.get_jobs():
|
| 118 |
if job.id != 'load_schedules':
|
| 119 |
jobs_to_remove.append(job.id)
|
| 120 |
+
|
| 121 |
for job_id in jobs_to_remove:
|
| 122 |
try:
|
| 123 |
self.scheduler.remove_job(job_id)
|
| 124 |
except Exception as e:
|
| 125 |
logger.warning(f"Failed to remove job {job_id}: {str(e)}")
|
| 126 |
+
|
| 127 |
# Create jobs for each schedule
|
| 128 |
for schedule in schedules:
|
| 129 |
try:
|
| 130 |
schedule_id = schedule.get('id')
|
| 131 |
schedule_time = schedule.get('schedule_time')
|
| 132 |
adjusted_time = schedule.get('adjusted_time')
|
| 133 |
+
|
| 134 |
if not schedule_time or not adjusted_time:
|
| 135 |
logger.warning(f"⚠️ Invalid schedule format for schedule {schedule_id}")
|
| 136 |
continue
|
| 137 |
+
|
| 138 |
# Parse timezone information
|
| 139 |
server_timezone = get_server_timezone()
|
| 140 |
schedule_time_part, schedule_timezone = parse_timezone_schedule(schedule_time)
|
| 141 |
adjusted_time_part, adjusted_timezone = parse_timezone_schedule(adjusted_time)
|
| 142 |
+
|
| 143 |
# Convert to server timezone for APScheduler
|
| 144 |
if schedule_timezone and validate_timezone(schedule_timezone):
|
| 145 |
server_schedule_time = convert_time_to_timezone(schedule_time_part, schedule_timezone, server_timezone)
|
|
|
|
| 148 |
# Use original time if no valid timezone
|
| 149 |
server_schedule_time = schedule_time_part
|
| 150 |
server_adjusted_time = adjusted_time_part
|
| 151 |
+
|
| 152 |
# Parse schedule times for server timezone
|
| 153 |
content_gen_cron = self._parse_schedule_time(server_adjusted_time)
|
| 154 |
publish_cron = self._parse_schedule_time(server_schedule_time)
|
| 155 |
+
|
| 156 |
# Create content generation job (5 minutes before publishing)
|
| 157 |
gen_job_id = f"gen_{schedule_id}"
|
| 158 |
self.scheduler.add_job(
|
|
|
|
| 167 |
args=[schedule.get('Social_network', {}).get('id_utilisateur'), schedule_id],
|
| 168 |
replace_existing=True
|
| 169 |
)
|
| 170 |
+
|
| 171 |
# Create publishing job
|
| 172 |
pub_job_id = f"pub_{schedule_id}"
|
| 173 |
self.scheduler.add_job(
|
|
|
|
| 182 |
args=[schedule_id],
|
| 183 |
replace_existing=True
|
| 184 |
)
|
| 185 |
+
|
| 186 |
logger.info(f"📅 Created schedule jobs for {schedule_id}")
|
| 187 |
+
|
| 188 |
except Exception as e:
|
| 189 |
logger.error(f"❌ Error creating jobs for schedule {schedule.get('id')}: {str(e)}")
|
| 190 |
+
|
| 191 |
except Exception as e:
|
| 192 |
logger.error(f"❌ Error loading schedules: {str(e)}")
|
| 193 |
+
|
| 194 |
def _parse_schedule_time(self, schedule_time):
|
| 195 |
"""
|
| 196 |
Parse schedule time string into cron format.
|
| 197 |
+
|
| 198 |
Args:
|
| 199 |
schedule_time (str): Schedule time in format "Day HH:MM"
|
| 200 |
+
|
| 201 |
Returns:
|
| 202 |
dict: Cron parameters
|
| 203 |
"""
|
| 204 |
try:
|
| 205 |
day_name, time_str = schedule_time.split()
|
| 206 |
hour, minute = map(int, time_str.split(':'))
|
| 207 |
+
|
| 208 |
# Map day names to cron format
|
| 209 |
day_map = {
|
| 210 |
'Monday': 0,
|
|
|
|
| 215 |
'Saturday': 5,
|
| 216 |
'Sunday': 6
|
| 217 |
}
|
| 218 |
+
|
| 219 |
day_of_week = day_map.get(day_name, '*')
|
| 220 |
return {
|
| 221 |
'minute': minute,
|
|
|
|
| 230 |
'hour': '*',
|
| 231 |
'day_of_week': '*'
|
| 232 |
}
|
| 233 |
+
|
| 234 |
def generate_content_task(self, user_id: str, schedule_id: str):
|
| 235 |
"""
|
| 236 |
APScheduler task to generate content for a scheduled post.
|
| 237 |
+
|
| 238 |
Args:
|
| 239 |
user_id (str): User ID
|
| 240 |
schedule_id (str): Schedule ID
|
| 241 |
"""
|
| 242 |
try:
|
| 243 |
logger.info(f"🎨 Generating content for schedule {schedule_id}")
|
| 244 |
+
|
| 245 |
# Run within application context
|
| 246 |
with self.app.app_context():
|
| 247 |
# Initialize content service
|
| 248 |
content_service = ContentService()
|
| 249 |
+
|
| 250 |
# Generate content using content service
|
| 251 |
generated_result = content_service.generate_post_content(user_id)
|
| 252 |
+
|
| 253 |
# Ensure proper extraction of text content and image data from tuple
|
| 254 |
# ContentService.generate_post_content() always returns a tuple: (text_content, image_data)
|
| 255 |
if isinstance(generated_result, (tuple, list)) and len(generated_result) >= 1:
|
|
|
|
| 257 |
text_content = generated_result[0] if generated_result[0] is not None else "Generated content will appear here..."
|
| 258 |
# Extract image data (second element) if it exists
|
| 259 |
image_data = generated_result[1] if len(generated_result) >= 2 and generated_result[1] is not None else None
|
| 260 |
+
|
| 261 |
# Additional safeguard: ensure text_content is always a string, never a list/tuple
|
| 262 |
if not isinstance(text_content, str):
|
| 263 |
text_content = str(text_content) if text_content is not None else "Generated content will appear here..."
|
|
|
|
| 265 |
# Fallback for unexpected return types
|
| 266 |
text_content = str(generated_result) if generated_result is not None else "Generated content will appear here..."
|
| 267 |
image_data = None
|
| 268 |
+
|
| 269 |
# Final validation to ensure text_content is never stored as a list/tuple
|
| 270 |
if isinstance(text_content, (list, tuple)):
|
| 271 |
# Convert list/tuple to string representation
|
| 272 |
text_content = str(text_content)
|
| 273 |
+
|
| 274 |
# Process image data for proper storage
|
| 275 |
processed_image_data = None
|
| 276 |
if image_data is not None:
|
|
|
|
| 282 |
logger.error(f"❌ Error processing image data for schedule {schedule_id}: {str(e)}")
|
| 283 |
# Continue with text content even if image processing fails
|
| 284 |
processed_image_data = None
|
| 285 |
+
|
| 286 |
# Store generated content in database
|
| 287 |
# We need to get the social account ID from the schedule
|
| 288 |
schedule_response = (
|
|
|
|
| 292 |
.eq("id", schedule_id)
|
| 293 |
.execute()
|
| 294 |
)
|
| 295 |
+
|
| 296 |
if not schedule_response.data:
|
| 297 |
raise Exception(f"Schedule {schedule_id} not found")
|
| 298 |
+
|
| 299 |
social_account_id = schedule_response.data[0]['id_social']
|
| 300 |
+
|
| 301 |
# Prepare post data
|
| 302 |
post_data = {
|
| 303 |
"id_social": social_account_id,
|
|
|
|
| 305 |
"is_published": False,
|
| 306 |
"sched": schedule_id
|
| 307 |
}
|
| 308 |
+
|
| 309 |
# Add processed image data if present
|
| 310 |
if processed_image_data is not None:
|
| 311 |
post_data["image_content_url"] = processed_image_data
|
| 312 |
+
|
| 313 |
# Store the generated content
|
| 314 |
response = (
|
| 315 |
self.supabase_client
|
|
|
|
| 317 |
.insert(post_data)
|
| 318 |
.execute()
|
| 319 |
)
|
| 320 |
+
|
| 321 |
if response.data:
|
| 322 |
logger.info(f"✅ Content generated and stored for schedule {schedule_id}")
|
| 323 |
else:
|
| 324 |
logger.error(f"❌ Failed to store generated content for schedule {schedule_id}")
|
| 325 |
+
|
| 326 |
except Exception as e:
|
| 327 |
logger.error(f"❌ Error in content generation task for schedule {schedule_id}: {str(e)}")
|
| 328 |
+
|
| 329 |
def publish_post_task(self, schedule_id: str):
|
| 330 |
"""
|
| 331 |
APScheduler task to publish a scheduled post.
|
| 332 |
+
|
| 333 |
Args:
|
| 334 |
schedule_id (str): Schedule ID
|
| 335 |
"""
|
| 336 |
try:
|
| 337 |
logger.info(f"🚀 Publishing post for schedule {schedule_id}")
|
| 338 |
+
|
| 339 |
# Run within application context
|
| 340 |
with self.app.app_context():
|
| 341 |
# Fetch the post to publish
|
|
|
|
| 349 |
.limit(1)
|
| 350 |
.execute()
|
| 351 |
)
|
| 352 |
+
|
| 353 |
if not response.data:
|
| 354 |
logger.info(f"📭 No unpublished posts found for schedule {schedule_id}")
|
| 355 |
return
|
| 356 |
+
|
| 357 |
post = response.data[0]
|
| 358 |
post_id = post.get('id')
|
| 359 |
text_content = post.get('Text_content')
|
| 360 |
image_url = post.get('image_content_url')
|
| 361 |
+
|
| 362 |
+
logger.info(f"📄 Post content to be published: {text_content[:100]}...") # Log first 100 chars
|
| 363 |
+
logger.info(f"🖼️ Image URL: {image_url}")
|
| 364 |
+
|
| 365 |
# Get social network credentials
|
| 366 |
schedule_response = (
|
| 367 |
self.supabase_client
|
|
|
|
| 370 |
.eq("id", schedule_id)
|
| 371 |
.execute()
|
| 372 |
)
|
| 373 |
+
|
| 374 |
if not schedule_response.data:
|
| 375 |
+
logger.error(f"❌ Schedule {schedule_id} not found in database")
|
| 376 |
+
return
|
| 377 |
+
|
| 378 |
social_network = schedule_response.data[0].get('Social_network', {})
|
| 379 |
access_token = social_network.get('token')
|
| 380 |
user_sub = social_network.get('sub')
|
| 381 |
+
|
| 382 |
if not access_token or not user_sub:
|
| 383 |
logger.error(f"❌ Missing social network credentials for schedule {schedule_id}")
|
| 384 |
return
|
| 385 |
+
|
| 386 |
+
logger.info(f"🔐 Access token exists: {bool(access_token)}")
|
| 387 |
+
logger.info(f"👤 User sub exists: {bool(user_sub)}")
|
| 388 |
+
|
| 389 |
+
# Publish to LinkedIn - ensure we're within app context
|
| 390 |
+
# Import here to ensure it has access to app context
|
| 391 |
+
from backend.services.linkedin_service import LinkedInService
|
| 392 |
linkedin_service = LinkedInService()
|
| 393 |
+
logger.info(f"🔗 LinkedIn service initialized successfully")
|
| 394 |
+
|
| 395 |
publish_response = linkedin_service.publish_post(
|
| 396 |
access_token, user_sub, text_content, image_url
|
| 397 |
)
|
| 398 |
+
|
| 399 |
+
logger.info(f"✅ LinkedIn API response received for schedule {schedule_id}")
|
| 400 |
+
|
| 401 |
# Update post status in database
|
| 402 |
update_response = (
|
| 403 |
self.supabase_client
|
|
|
|
| 406 |
.eq("id", post_id)
|
| 407 |
.execute()
|
| 408 |
)
|
| 409 |
+
|
| 410 |
+
logger.info(f"✅ Post status updated to published for schedule {schedule_id}")
|
| 411 |
+
|
| 412 |
except Exception as e:
|
| 413 |
logger.error(f"❌ Error in publishing task for schedule {schedule_id}: {str(e)}")
|
| 414 |
+
logger.error(f"Full error traceback: ", exc_info=True) # Add full traceback
|
| 415 |
+
|
| 416 |
def trigger_immediate_update(self):
|
| 417 |
"""Trigger immediate schedule update."""
|
| 418 |
try:
|
|
|
|
| 422 |
except Exception as e:
|
| 423 |
logger.error(f"❌ Error triggering immediate schedule update: {str(e)}")
|
| 424 |
return False
|
| 425 |
+
|
| 426 |
def shutdown(self):
|
| 427 |
"""Shutdown the scheduler."""
|
| 428 |
if self.scheduler:
|
sprint_change_proposal.md
DELETED
|
@@ -1,87 +0,0 @@
|
|
| 1 |
-
# Sprint Change Proposal: LinkedIn Token Expiration Management System
|
| 2 |
-
|
| 3 |
-
## Analysis Summary
|
| 4 |
-
|
| 5 |
-
**Original Issue**: LinkedIn tokens expire after 2 months, requiring manual reconnection process that creates a poor user experience.
|
| 6 |
-
|
| 7 |
-
**Impact Analysis**:
|
| 8 |
-
- Currently, the `Social_network` table already has an `expiration` column that can be leveraged
|
| 9 |
-
- No schema changes needed, only implementation of business logic
|
| 10 |
-
- Requires background task scheduling and email notification system
|
| 11 |
-
- Affects user experience for LinkedIn integration users
|
| 12 |
-
|
| 13 |
-
**Rationale for Chosen Path**: Implementing automated token refresh with 50-day cycle and daily background checks minimizes user disruption while maintaining security best practices.
|
| 14 |
-
|
| 15 |
-
## Specific Proposed Edits
|
| 16 |
-
|
| 17 |
-
### 1. Update Account Linking Process
|
| 18 |
-
|
| 19 |
-
**Current**: LinkedIn tokens are stored without expiration tracking
|
| 20 |
-
|
| 21 |
-
**Proposed Changes**:
|
| 22 |
-
- Modify the LinkedIn account linking process to automatically set the `expiration` column to 50 days after link date
|
| 23 |
-
- Add validation to ensure the `expiration` column is properly set during the connection process
|
| 24 |
-
|
| 25 |
-
### 2. Create Background Task Scheduler
|
| 26 |
-
|
| 27 |
-
**Current**: No automated expiration checks exist
|
| 28 |
-
|
| 29 |
-
**Proposed Changes**:
|
| 30 |
-
- Implement a background task that runs at noon and midnight daily
|
| 31 |
-
- Create a function to check for accounts with expiration date equal to current date
|
| 32 |
-
- Add token refresh logic for LinkedIn tokens
|
| 33 |
-
- Implement error handling and logging
|
| 34 |
-
|
| 35 |
-
### 3. Implement Token Refresh Process
|
| 36 |
-
|
| 37 |
-
**Current**: No automated refresh mechanism exists
|
| 38 |
-
|
| 39 |
-
**Proposed Changes**:
|
| 40 |
-
- Create function to perform LinkedIn token refresh using LinkedIn's API
|
| 41 |
-
- Handle successful refresh (update tokens and expiration)
|
| 42 |
-
- Handle failed refresh (send user notification)
|
| 43 |
-
|
| 44 |
-
### 4. Implement Email Notification System
|
| 45 |
-
|
| 46 |
-
**Current**: No automated notification system exists for failed refreshes
|
| 47 |
-
|
| 48 |
-
**Proposed Changes**:
|
| 49 |
-
- Create function to send email notification when refresh fails
|
| 50 |
-
- Include clear instructions for users to reconnect their LinkedIn account
|
| 51 |
-
|
| 52 |
-
### 5. Add Refresh Token Storage
|
| 53 |
-
|
| 54 |
-
**Current**: Only access token is stored in the `token` column
|
| 55 |
-
|
| 56 |
-
**Proposed Changes**:
|
| 57 |
-
- Modify the initial LinkedIn connection flow to also store the refresh token
|
| 58 |
-
- Update schema to add refresh_token column or modify existing storage approach
|
| 59 |
-
|
| 60 |
-
## Additional Implementation Considerations
|
| 61 |
-
|
| 62 |
-
### 6. Logging and Monitoring
|
| 63 |
-
- Add comprehensive logging for the refresh process
|
| 64 |
-
- Create monitoring for failed refresh attempts
|
| 65 |
-
- Log successful refreshes for tracking
|
| 66 |
-
|
| 67 |
-
### 7. Error Handling Improvements
|
| 68 |
-
- Implement retry logic for temporary failures
|
| 69 |
-
- Handle rate limiting from LinkedIn API
|
| 70 |
-
- Graceful degradation when refresh fails
|
| 71 |
-
|
| 72 |
-
### 8. Testing Requirements
|
| 73 |
-
- Unit tests for the refresh logic
|
| 74 |
-
- Integration tests for the scheduling system
|
| 75 |
-
- Testing of email notification system
|
| 76 |
-
|
| 77 |
-
## Success Metrics
|
| 78 |
-
- Reduction in manual reconnection requests
|
| 79 |
-
- Improved user retention for LinkedIn integration
|
| 80 |
-
- Decreased support tickets related to token expiration
|
| 81 |
-
|
| 82 |
-
## Implementation Timeline
|
| 83 |
-
1. **Day 1**: Implement token refresh logic and database updates
|
| 84 |
-
2. **Day 2**: Implement background scheduler and email notifications
|
| 85 |
-
3. **Week 1-2**: Testing, monitoring, and adjustments
|
| 86 |
-
|
| 87 |
-
This proposal addresses the LinkedIn token expiration issue while leveraging your existing database structure efficiently. The system will automatically handle token refresh for users, sending notifications only when automatic refresh fails, thus improving the user experience significantly.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|