# weibo_new.py
# 此为使用webdriver的爬虫版本，总是能扫码登录，更方便。

import os
import re
import json
import time
import random
import logging
import requests
import sqlite3
import traceback
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException

from core.parser import WeiboParser
from core.models import User
from crawler.webdriver import initialize_webdriver
from tools.time_tool import handle_time
from core.storage import UserStorage, WeiboStorage

# 从基类文件导入 WeiboBaseCrawler
from weibo_base import WeiboBaseCrawler

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


class WeiboWebDriverCrawler(WeiboBaseCrawler):
    def __init__(self, user_id, cookie, output_dir='./output', cookie_path=None, db_path=None):
        super().__init__(user_id, cookie, output_dir)

        # 初始化Chrome选项
        # 使用统一的 WebDriver 初始化方法
        self.driver = initialize_webdriver(is_no_image=False)  # 可按需设置 is_no_image=True 禁用图片加载
        self.cookie_path = cookie_path or 'data/cookie.json'  # 新增 cookie 路径属性

        # 设置隐式等待
        self.driver.implicitly_wait(10)

        # 设置显式等待默认时间
        self.wait_time = 15

        # 设置请求头中的User-Agent
        self.user_agent = self.driver.execute_script("return navigator.userAgent;")

        self.db_path = db_path or 'weibo.db'
        if self.db_path:
            self.create_tables(self.db_path)

    def __del__(self):
        """析构方法，关闭浏览器"""
        if hasattr(self, 'driver'):
            self.driver.quit()

    '''登录和cookies'''

    def load_cookies_from_file(self, cookie_file=None):
        """
        从本地文件加载 cookies 并添加到当前浏览器会话中。
        :param cookie_file: Cookie 文件路径
        """
        cookie_file = cookie_file or self.cookie_path
        if not os.path.exists(cookie_file):
            logging.warning(f"Cookie 文件不存在：{cookie_file}")
            return False

        try:
            with open(cookie_file, 'r', encoding='utf-8') as f:
                cookies = json.load(f)

            # 必须先访问目标网站才能添加 Cookie
            self.driver.get('https://weibo.cn/')

            for cookie in cookies:
                # 删除不必要的字段
                cookie.pop('sameSite', None)
                cookie.pop('httpOnly', None)
                self.driver.add_cookie(cookie)

            logging.info(f"Cookies 已从 {cookie_file} 加载")
            return True
        except Exception as e:
            logging.error(f"加载 cookies 失败: {e}")
            return False

    def save_cookies_to_file(self, cookie_file=None):
        """
        将当前会话的 cookies 保存到文件中，供下次使用。
        :param cookie_file: 保存的文件路径，若不指定则使用默认路径
        """
        cookie_file = cookie_file or getattr(self, 'cookie_path', None)
        if not cookie_file:
            cookie_file = 'data/cookie.json'  # init 用的是data/cookie.json
            self.cookie_path = cookie_file

        try:
            cookies = self.driver.get_cookies()
            with open(cookie_file, 'w', encoding='utf-8') as f:
                json.dump(cookies, f)
            logging.info(f"Cookies 已保存至 {cookie_file}")
            return True
        except Exception as e:
            logging.error(f"保存 cookies 失败: {e}")
            return False

    def _login(self):
        """
        登录逻辑：
        1. 如果存在 cookie 文件，则尝试加载 cookie 登录
        2. 否则，跳转到扫码登录页，等待用户手动登录
        3. 登录成功后保存 cookie
        """

        # 1. 尝试从文件加载 Cookie
        if self.cookie_path and os.path.exists(self.cookie_path):
            logging.info(f"尝试使用本地 Cookie 登录")
            if self.load_cookies_from_file():
                self.driver.get(f'https://weibo.cn/{self.user_id}/info')
                if self._check_login_status():
                    logging.info("本地 Cookie 登录成功")
                    return True
                else:
                    logging.warning("Cookie 已过期或无效")

        # 2. 没有有效 Cookie，进入扫码登录流程
        login_url = f'https://weibo.cn/login'
        logging.info("未找到有效 Cookie，跳转至扫码登录页面，请手动登录。")
        self.driver.get(login_url)

        # 等待用户扫码登录
        print("\n请用手机微博扫码登录，登录完成后按 Enter 键继续...")
        user_input = input("按 Enter 继续爬取，输入 q/Q/quit/exit 退出程序：").strip().lower()
        if user_input in ['q', 'quit', 'exit']:
            logging.info("用户主动退出程序")
            return False

        # 保存 Cookie
        self.save_cookies_to_file()

        # 回到目标页面
        self.driver.get(f'https://weibo.cn/{self.user_id}/info')

        # 再次验证是否登录成功
        if not self._check_login_status():
            logging.error("登录失败，请检查是否完成登录操作")
            return False

        logging.info("扫码登录成功")
        return True

    def _check_login_status(self):
        try:
            WebDriverWait(self.driver, 10).until(
                EC.presence_of_element_located((By.XPATH, '//div[@class="tm"]/a'))
            )
            return True
        except:
            return False

    def get_cookies(self):
        """获取浏览器的 cookie"""
        cookies = self.driver.get_cookies()
        # 将 cookie 转换为字典格式
        cookie_dict = {cookie['name']: cookie['value'] for cookie in cookies}
        return cookie_dict

    def _make_request(self, url):
        """发送请求并返回页面内容"""
        try:
            self.driver.get(url)

            if '403' in self.driver.page_source and 'Forbidden' in self.driver.page_source:
                self.driver.back()
                return False

            # 等待页面加载完成
            WebDriverWait(self.driver, self.wait_time).until(
                EC.presence_of_element_located((By.TAG_NAME, 'body'))
            )

            # 滚动到底部触发懒加载
            self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
            time.sleep(random.uniform(1, 2))  # 页面加载后等待 2~4 秒

            return True

        except Exception as e:
            logging.error(f"请求失败: {e}")
            traceback.print_exc()
            return False

    def get_user_info(self):
        url = f'https://weibo.cn/{self.user_id}/info'
        if not self._make_request(url):
            logging.error(f"获取用户信息失败: {self.user_id}")
            return False

        try:
            html = self.driver.page_source
            user_data = WeiboParser.parse_user_info(html)
            self.user_info = User(**user_data)
            self.user_info.user_id = self.user_id  # 强制设置 user_id 来自传入参数

            logging.info(f"用户信息获取成功: {self.user_info.nickname}")
            return True
        except Exception as e:
            logging.error(f"解析用户信息失败: {e}")
            traceback.print_exc()
            return False

    '''初始化数据库和表'''

    def _init_database(self):
        """初始化数据库连接和表结构"""
        with sqlite3.connect(self.db_path) as conn:
            # 确保表存在
            self._create_tables(conn)

            # 保存用户信息（仅一次）
            if hasattr(self, 'user_info'):
                user_storage = UserStorage(conn)
                user_storage.save_user(self.user_info)
                logging.info("用户信息已保存到数据库")

            # 加载已有微博ID
            self._load_existing_weibo_ids(conn)

    def _create_tables(self, conn):
        """创建需要的表"""
        user_storage = UserStorage(conn)
        weibo_storage = WeiboStorage(conn)
        user_storage.create_table()
        weibo_storage.create_table()

    def _load_existing_weibo_ids(self, conn):
        """加载已存在的微博ID"""
        cursor = conn.cursor()
        cursor.execute('''
            SELECT id FROM weibo 
            WHERE user_id=? 
            ORDER BY publish_time DESC 
            LIMIT 100
        ''', (self.user_id,))
        self.existing_ids = set(row[0] for row in cursor.fetchall())

    '''爬取weibo'''

    def get_weibo_data(self, start_page=1, max_pages=None):
        """获取用户微博数据的主函数"""
        page = max(1, start_page)
        count = 0
        # 初始化数据库和加载已有ID
        self._init_database()

        while True:
            if not self._load_weibo_page(page):
                break

            weibo_divs = self._get_weibo_elements()
            if not weibo_divs:
                break

            has_new = self._parse_and_store_weibos(weibo_divs)
            if not has_new:
                logging.info("本页无新增微博，停止爬取")
                break

            if max_pages and page >= max_pages:
                logging.info(f"已达到最大页码{page}页，已退出爬取")
                break

            if not self._handle_next_page(page, count):
                break

            page += 1
            count += 1

        # 保存剩余数据
        if self.weibo_data:
            self._save_remaining_weibos()

        return True

    def _load_weibo_page(self, page):
        """加载指定页码的微博页面"""
        url = f'https://weibo.cn/{self.user_id}?page={page}'
        if not self._make_request(url):
            logging.error(f"获取第{page}页微博失败")
            return False

        try:
            WebDriverWait(self.driver, self.wait_time).until(
                EC.presence_of_element_located((By.XPATH, '//div[@class="c" and starts-with(@id, "M_")]'))
            )
            return True
        except Exception as e:
            logging.error(f"等待微博内容加载失败: {e}")
            return False

    def _get_weibo_elements(self):
        """获取当前页面的微博元素"""
        try:
            return self.driver.find_elements(By.XPATH, '//div[@class="c" and starts-with(@id, "M_")]')
        except Exception as e:
            logging.error(f"获取微博元素失败: {e}")
            return None

    def _parse_and_store_weibos(self, weibo_divs):
        """解析并存储微博数据"""
        added = False
        for div in weibo_divs:
            weibo = WeiboParser.parse_weibo(div.get_attribute('innerHTML'))
            if weibo and weibo['id'] not in self.existing_ids:
                weibo['user_id'] = self.user_id
                self.weibo_data.append(weibo)
                added = True
        return added

    def _handle_next_page(self, page, count):
        """处理下一页逻辑"""
        try:
            next_page = self.driver.find_element(By.XPATH, '//div[@id="pagelist"]//a[text()="下页"]')
            if next_page:
                # 每10页保存一次并清空缓存
                if count % 10 == 0:
                    self._save_and_clear_cache(count)
                time.sleep(random.uniform(1, 3))
                return True
            logging.info("未找到下一页链接，已到达最后一页")
            return False
        except NoSuchElementException:
            logging.info("未找到分页信息，已到达最后一页")
            return False
        except Exception as e:
            logging.error(f"处理下一页失败: {e}")
            return False

    def _save_and_clear_cache(self, count):
        """保存数据并清空缓存"""
        logging.info(f"已累计 {count} 页微博，正在保存到数据库...")
        self.save_to_sqlite(self.db_path)
        self.weibo_data.clear()

    def _save_remaining_weibos(self):
        """保存剩余的微博数据"""
        logging.info(f"正在保存最后一批共 {len(self.weibo_data)} 条微博数据")
        self.save_to_sqlite(self.db_path)


def main():
    import argparse
    parser = argparse.ArgumentParser(description='微博爬虫 - 爬取指定用户的微博内容')
    parser.add_argument('-u', '--user_id', required=True, help='用户ID')
    parser.add_argument('-c', '--cookie', help='微博Cookie字符串，用于登录状态')
    parser.add_argument('-o', '--output_dir', default='./output', help='输出目录，默认为./output')
    parser.add_argument('-p', '--max_pages', type=int, help='最大爬取页数，默认爬取所有页')
    parser.add_argument('-f', '--cookie_file', help='Cookie txt文件路径，文件内直接粘贴Cookie字符串')
    parser.add_argument('-s', '--cookie_save_path', help='保存 Cookie 的 JSON 文件路径')

    args = parser.parse_args()

    # 处理Cookie
    cookie = args.cookie
    if not cookie and args.cookie_file and os.path.exists(args.cookie_file):
        with open(args.cookie_file, 'r', encoding='utf-8') as f:
            cookie = f.read().strip()

    # 创建爬虫实例
    crawler = WeiboWebDriverCrawler(args.user_id, cookie=cookie, output_dir=args.output_dir,
                                    cookie_path=args.cookie_save_path)

    # 登录
    if not crawler._login():
        logging.error("登录失败")
        return

    # 获取用户信息
    if crawler.get_user_info():
        # 获取微博数据
        crawler.get_weibo_data(max_pages=args.max_pages)
        # 保存结果
        crawler.save_results()


if __name__ == '__main__':
    main()
