from playwright.sync_api import sync_playwright, TimeoutError as PlaywrightTimeoutError
from libs.facebook_scraper import FacebookScraper
import json
import yaml
import logging
import logging.config
import os
import requests
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.executors.pool import ThreadPoolExecutor
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.interval import IntervalTrigger
from utils import convert_facebook_time_to_timestamp
import datetime
import time

# 配置执行器和任务默认参数
executors = {
  'default': ThreadPoolExecutor(max_workers=3)  # 增加工作线程数
}

job_defaults = {
  'coalesce': True,           # 合并堆积的任务
  'max_instances': 2,         # 允许每个任务最多2个实例同时运行
  'misfire_grace_time': 1800  # 30分钟的容忍时间（秒）
}

scheduler = BackgroundScheduler(executors=executors, job_defaults=job_defaults)

# 已移除全局变量browser和context

def load_config():
  with open('config.yaml', 'r', encoding='utf-8') as f:
    return yaml.safe_load(f)

config = load_config()

env = os.environ.get('ENV', 'development')

if env == 'production':
  # 生产环境 - 使用/data/x_scraper/logs
  base_log_dir = '/data/facebook_scraper/logs'
else:
  # 本地开发环境 - 使用项目目录下的logs
  base_log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'logs')

# 确保日志目录存在
if not os.path.exists(base_log_dir):
  try:
    os.makedirs(base_log_dir)
    print(f"已创建日志目录: {base_log_dir}")
  except Exception as e:
    print(f"无法创建日志目录 {base_log_dir}: {e}")
    # 如果无法创建指定的日志目录，回退到当前目录下的logs
    base_log_dir = os.path.join(os.getcwd(), 'logs')
    if not os.path.exists(base_log_dir):
      os.makedirs(base_log_dir)
    print(f"将使用备用日志目录: {base_log_dir}")

with open('logging_config.yaml', 'r', encoding='utf-8') as f:
  logging_config = yaml.safe_load(f)
  filename = logging_config['handlers']['info_file']['filename']
  logging_config['handlers']['info_file']['filename'] = os.path.join(base_log_dir, filename)

  logging.config.dictConfig(logging_config)
  logger = logging.getLogger("scraper")
  info_logger = logging.getLogger("info")

  logger.info(f"使用日志目录: {base_log_dir}")

task_post_timestamps = {}
task_comment_timestamps = {}

# 获取cookies的函数
def get_cookies_from_api():
  """
  从API获取cookies
  """
  try:
    url = "https://open.feishu.cn/anycross/trigger/callback/NDRiYzFiMTg2NmIzOTIxMDNkNWJmODk3NDllZmNiNzMw"
    response = requests.post(url, timeout=10)
    response.raise_for_status()
    
    cookies_data = response.json()

    playwright_cookies = []
    for cookie_item in cookies_data:
      playwright_cookies.append({
        'name': cookie_item['name'],
        'value': cookie_item['value'],
        'domain': '.facebook.com',
        'path': '/'
      })
    
    return playwright_cookies
    
  except Exception as e:
    logger.error(f"获取cookies失败: {e}")
    return []

# 初始化调度器
def init_scheduler():
  scheduler.start()
  global cookies
  cookies = get_cookies_from_api()
  logger.info(f"cookies: {cookies}")
  for task in config['scheduled_tasks']:
    logger.info(f"加载任务: {task}")
    add_job_to_scheduler(task)
    
    # 立即执行一次任务
    logger.info(f"立即执行任务: {task['name']}")
    run_scrape_task(task, cookies)
      
  logger.info("调度器已启动")

# 添加任务到调度器
def add_job_to_scheduler(task):
  job_id = f"task_{task['name'].replace(' ', '_')}"

  # 删除已存在的同名任务
  if scheduler.get_job(job_id):
    scheduler.remove_job(job_id)

  # 创建触发器
  if task['schedule']['type'] == 'cron':
    trigger = CronTrigger(
      day_of_week=task['schedule'].get('day_of_week', '*'),
      hour=task['schedule'].get('hour', '*'),
      minute=task['schedule'].get('minute', '*')
    )
  else:
    # interval
    hours = int(task['schedule'].get('hours', 0))
    minutes = int(task['schedule'].get('minutes', 0))

    # 如果hours和minutes都为0，默认设置为5分钟
    if hours == 0 and minutes == 0:
      minutes = 5

    trigger = IntervalTrigger(hours=hours, minutes=minutes)
  
  # 添加任务到调度器
  scheduler.add_job(
    run_scrape_task_with_cookies,
    trigger=trigger,
    id=job_id,
    args=[task],
    name=task['name'],
    max_instances=2,        # 允许2个实例同时运行
    misfire_grace_time=1800, # 30分钟容忍时间
    coalesce=True  # 合并堆积的任务
  )
  logger.info(f"已添加任务: {task['name']}")

def run_scrape_task_with_cookies(task):
    
    # 执行任务
    run_scrape_task(task, cookies)

# 运行任务
def run_scrape_task(task, cookies):
  start_time = time.time()
  task_name = task['name']
  
  try:
    logger.info(f"开始执行任务: {task_name} - 开始时间: {datetime.datetime.now()}")
    # 如果任务的帖子时间游标不存在，则初始化
    if task_name not in task_post_timestamps:
      if task.get('time_cursor', '') == 'now':
        task_post_timestamps[task_name] = int(datetime.datetime.now().timestamp())
      else:
        task_post_timestamps[task_name] = 0

    # 如果任务的评论时间游标不存在，则初始化
    if task_name not in task_comment_timestamps:
      if task.get('time_cursor', '') == 'now':
        task_comment_timestamps[task_name] = int(datetime.datetime.now().timestamp())
      else:
        task_comment_timestamps[task_name] = 0

    logger.info(f"执行任务: {task_name}")
    logger.info(f"帖子时间游标: {task_post_timestamps[task_name]}")
    logger.info(f"评论时间游标: {task_comment_timestamps[task_name]}")

    game_id = task['game_id']
    page_name = task['page_name']
    limit = task['limit']

    # 为每个任务创建新的Playwright实例
    with sync_playwright() as playwright:
      # 创建浏览器和上下文
      headless = env == 'production'
      browser = playwright.chromium.launch(headless=True)
      context = browser.new_context()
      context.add_cookies(cookies)
      
      scraper = FacebookScraper(page_name, logger)

      page = context.new_page()
      
      try:
        page.goto(f"https://www.facebook.com/{page_name}")
      except Exception as e:
        if "crashed" in str(e).lower():
          logger.error(f"任务 {task_name} 页面崩溃，停止执行: {e}")
          page.close()
          browser.close()
          return
        else:
          logger.error(f"任务 {task_name} 导航到页面失败: {e}")
          page.close()
          browser.close()
          return

      # 等待页面加载完成
      page.wait_for_timeout(1000)

      posts = scraper.get_posts(page, limit)

      post_cursor = task_post_timestamps[task_name]
      comment_cursor = task_comment_timestamps[task_name]
      latest_post_timestamp = 0
      latest_comment_timestamp = 0

      new_posts_count = 0
      new_comments_count = 0

      for post in posts:
        try:
          page.goto(post['post_url'])
        except Exception as e:
          if "crashed" in str(e).lower():
            logger.error(f"任务 {task_name} 在访问帖子 {post['post_id']} 时页面崩溃，停止执行: {e}")
            page.close()
            browser.close()
            return
          else:
            logger.error(f"任务 {task_name} 导航到帖子 {post['post_id']} 失败: {e}")
            continue  # 跳过这个帖子，继续处理下一个
        
        page.wait_for_timeout(1000)

        post_dialog_ele = page.locator('//*[@role="dialog"][@aria-labelledby]')
        post_content = scraper.find_post_content(post_dialog_ele)

        link_ele = post_dialog_ele.locator(f'//*[@id]//a[contains(@href, "www.facebook.com/{page_name}/posts") and not(contains(@href, "comment_id"))]')
        # 获取相对时间描述（如"3小时"）
        time_str = link_ele.get_attribute('aria-label')
        # 将时间字符串转换为时间戳
        timestamp = convert_facebook_time_to_timestamp(time_str)

        if timestamp > latest_post_timestamp:
          latest_post_timestamp = timestamp
        
        # 只打印比上次爬取更新的帖子
        if timestamp > post_cursor:
          post_info = {
            "log_type": "public_opinion_forum_post",
            "source": "facebook",
            "game_id": game_id,
            "forum_name": page_name,
            "post_id": post['post_id'],
            "event_time": datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc).strftime('%Y-%m-%d %H:%M:%S'),
            "author_name": page_name,
            "post_url": post['post_url'],
            "post_content": post_content
          }
          info_logger.info(json.dumps(post_info, ensure_ascii=False))
          new_posts_count += 1

        scraper.load_all_comments(post_dialog_ele, page)
        comments = scraper.get_comments(post_dialog_ele)

        for comment in comments:
          comment_timestamp = comment['comment_time']
          if comment_timestamp > latest_comment_timestamp:
            latest_comment_timestamp = comment_timestamp

          # 只打印比上次爬取更新的回复
          if comment_timestamp > comment_cursor:
            comment_info = {
              "log_type": "public_opinion_forum_comments",
              "source": "facebook",
              "game_id": game_id,
              "forum_name": page_name,
              "post_id": post['post_id'],
              "comment_id": comment['comment_id'],
              "comment_author": comment['comment_author'],
              "comment_content": comment['comment_content'],
              "event_time": datetime.datetime.fromtimestamp(comment_timestamp, datetime.timezone.utc).strftime('%Y-%m-%d %H:%M:%S'),
            }
            info_logger.info(json.dumps(comment_info, ensure_ascii=False))
            new_comments_count += 1

      task_post_timestamps[task_name] = latest_post_timestamp
      task_comment_timestamps[task_name] = latest_comment_timestamp

      if len(posts) == 0:
        logger.info(f"任务: {task_name} 没有获取到帖子，cookie 可能已过期，请重新登录")
      else:
        end_time = time.time()
        execution_time = end_time - start_time
        logger.info(f"任务: {task_name} 完成，执行时间: {execution_time:.2f}秒，新帖子数: {new_posts_count} - 新回复数: {new_comments_count}")
      
      # 关闭浏览器
      page.close()
      browser.close()
      
  except Exception as e:
    if "crashed" in str(e).lower():
      logger.error(f"任务 {task_name} 执行过程中页面崩溃，停止执行: {e}")
    else:
      logger.error(f"任务 {task_name} 执行过程中发生错误: {e}")
    
    # 确保资源被释放
    try:
      if 'page' in locals():
        page.close()
      if 'browser' in locals():
        browser.close()
    except:
      pass
      
if __name__ == '__main__':
  try:
    init_scheduler()
    # 保持主线程运行
    while True:
      pass
  finally:
    scheduler.shutdown()