"""Task Scheduler for running collection jobs.""" import asyncio import logging from datetime import datetime, timedelta from typing import Any, Dict, Optional from apscheduler.schedulers.asyncio import AsyncIOScheduler from apscheduler.triggers.interval import IntervalTrigger from sqlalchemy import select from app.db.session import async_session_factory from app.models.datasource import DataSource from app.models.task import CollectionTask from app.services.collectors.registry import collector_registry logger = logging.getLogger(__name__) scheduler = AsyncIOScheduler() async def _update_next_run_at(datasource: DataSource, session) -> None: job = scheduler.get_job(datasource.source) datasource.next_run_at = job.next_run_time if job else None await session.commit() async def _apply_datasource_schedule(datasource: DataSource, session) -> None: collector = collector_registry.get(datasource.source) if not collector: logger.warning("Collector not found for datasource %s", datasource.source) return collector_registry.set_active(datasource.source, datasource.is_active) existing_job = scheduler.get_job(datasource.source) if existing_job: scheduler.remove_job(datasource.source) if datasource.is_active: scheduler.add_job( run_collector_task, trigger=IntervalTrigger(minutes=max(1, datasource.frequency_minutes)), id=datasource.source, name=datasource.name, replace_existing=True, kwargs={"collector_name": datasource.source}, ) logger.info( "Scheduled collector: %s (every %sm)", datasource.source, datasource.frequency_minutes, ) else: logger.info("Collector disabled: %s", datasource.source) await _update_next_run_at(datasource, session) async def run_collector_task(collector_name: str): """Run a single collector task.""" collector = collector_registry.get(collector_name) if not collector: logger.error("Collector not found: %s", collector_name) return async with async_session_factory() as db: result = await db.execute(select(DataSource).where(DataSource.source == collector_name)) datasource = result.scalar_one_or_none() if not datasource: logger.error("Datasource not found for collector: %s", collector_name) return if not datasource.is_active: logger.info("Skipping disabled collector: %s", collector_name) return try: collector._datasource_id = datasource.id logger.info("Running collector: %s (datasource_id=%s)", collector_name, datasource.id) task_result = await collector.run(db) datasource.last_run_at = datetime.utcnow() datasource.last_status = task_result.get("status") await _update_next_run_at(datasource, db) logger.info("Collector %s completed: %s", collector_name, task_result) except Exception as exc: datasource.last_run_at = datetime.utcnow() datasource.last_status = "failed" await db.commit() logger.exception("Collector %s failed: %s", collector_name, exc) async def cleanup_stale_running_tasks(max_age_hours: int = 2) -> int: """Mark stale running tasks as failed after restarts or collector hangs.""" cutoff = datetime.utcnow() - timedelta(hours=max_age_hours) async with async_session_factory() as db: result = await db.execute( select(CollectionTask).where( CollectionTask.status == "running", CollectionTask.started_at.is_not(None), CollectionTask.started_at < cutoff, ) ) stale_tasks = result.scalars().all() for task in stale_tasks: task.status = "failed" task.phase = "failed" task.completed_at = datetime.utcnow() existing_error = (task.error_message or "").strip() cleanup_error = "Marked failed automatically after stale running task cleanup" task.error_message = f"{existing_error}\n{cleanup_error}".strip() if existing_error else cleanup_error if stale_tasks: await db.commit() logger.warning("Cleaned up %s stale running collection task(s)", len(stale_tasks)) return len(stale_tasks) def start_scheduler() -> None: """Start the scheduler.""" if not scheduler.running: scheduler.start() logger.info("Scheduler started") def stop_scheduler() -> None: """Stop the scheduler.""" if scheduler.running: scheduler.shutdown(wait=False) logger.info("Scheduler stopped") async def sync_scheduler_with_datasources() -> None: """Synchronize scheduler jobs with datasource table.""" async with async_session_factory() as db: result = await db.execute(select(DataSource).order_by(DataSource.id)) datasources = result.scalars().all() configured_sources = {datasource.source for datasource in datasources} for job in list(scheduler.get_jobs()): if job.id not in configured_sources: scheduler.remove_job(job.id) for datasource in datasources: await _apply_datasource_schedule(datasource, db) async def sync_datasource_job(datasource_id: int) -> bool: """Synchronize a single datasource job after settings changes.""" async with async_session_factory() as db: datasource = await db.get(DataSource, datasource_id) if not datasource: return False await _apply_datasource_schedule(datasource, db) return True def get_scheduler_jobs() -> list[Dict[str, Any]]: """Get all scheduled jobs.""" jobs = [] for job in scheduler.get_jobs(): jobs.append( { "id": job.id, "name": job.name, "next_run_time": job.next_run_time.isoformat() if job.next_run_time else None, "trigger": str(job.trigger), } ) return jobs async def get_latest_task_id_for_datasource(datasource_id: int) -> Optional[int]: from app.models.task import CollectionTask async with async_session_factory() as db: result = await db.execute( select(CollectionTask.id) .where(CollectionTask.datasource_id == datasource_id) .order_by(CollectionTask.created_at.desc(), CollectionTask.id.desc()) .limit(1) ) return result.scalar_one_or_none() def run_collector_now(collector_name: str) -> bool: """Run a collector immediately (not scheduled).""" collector = collector_registry.get(collector_name) if not collector: logger.error("Collector not found: %s", collector_name) return False try: asyncio.create_task(run_collector_task(collector_name)) logger.info("Triggered collector: %s", collector_name) return True except Exception as exc: logger.error("Failed to trigger collector %s: %s", collector_name, exc) return False