import os
import time
import logging
import requests
import schedule
from datetime import datetime
from bs4 import BeautifulSoup
import mysql.connector
from mysql.connector import Error
import json
import sys
import re

BASE_DIR = os.path.dirname(os.path.abspath(__file__))  # 获取当前文件所在目录
LOGS_DIR = os.path.join(BASE_DIR, 'logs')
CONFIG_DIR = os.path.join(BASE_DIR, 'config')

# 确保目录存在
os.makedirs(LOGS_DIR, exist_ok=True)
os.makedirs(CONFIG_DIR, exist_ok=True)

# 配置参数
CONFIG = {
    'timeout': 10,
    'max_retries': 3,
    'retry_delay': 5,
    'max_related_items': 5,
    'schedule_times': ['09:39'],
    'user_agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
}
LOG_FILE = os.path.join(LOGS_DIR, f'crawler_{datetime.now().strftime("%Y%m%d")}.log')

# 日志配置
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(LOG_FILE, encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

# 停止标志
STOP_FLAG_FILE = 'stop_crawler.flag'

def should_stop():
    """检查是否应该停止"""
    return os.path.exists(STOP_FLAG_FILE)

def get_config_file_path():
    """获取配置文件路径"""
    return os.path.join(CONFIG_DIR, 'db_config.json')

def create_default_db_config():
    """创建默认数据库配置文件"""
    default_config = {
        "host": "localhost",
        "port": 3306,
        "user": "root",
        "password": "123456",
        "database": "hot_chain",
        "charset": "utf8mb4",
        "autocommit": True
    }

    config_file = get_config_file_path()
    try:
        with open(config_file, 'w', encoding='utf-8') as f:
            json.dump(default_config, f, indent=2, ensure_ascii=False)
        logger.info(f"已创建默认的配置文件: {config_file}")
        logger.info("请修改其中的数据库连接信息后重新运行程序")
        return True
    except Exception as e:
        logger.error(f"创建默认配置文件失败: {e}")
        return False

def load_db_config():
    """加载数据库配置"""
    config_file = get_config_file_path()
    logger.info(f"正在加载配置文件: {config_file}")

    if not os.path.exists(config_file):
        logger.error(f"数据库配置文件不存在: {config_file}")
        raise FileNotFoundError(f"配置文件不存在: {config_file}")

    try:
        with open(config_file, 'r', encoding='utf-8') as f:
            config = json.load(f)

        # 验证必要的配置项
        required_fields = ['host', 'user', 'password', 'database']
        missing_fields = [field for field in required_fields if field not in config]

        if missing_fields:
            raise ValueError(f"配置文件缺少必要字段: {missing_fields}")

        if config.get('password') == '123456':
            logger.warning("检测到使用默认密码，请确认这是正确的数据库密码")

        logger.info("数据库配置加载成功")
        return config

    except json.JSONDecodeError as e:
        logger.error(f"配置文件格式错误: {e}")
        raise
    except Exception as e:
        logger.error(f"加载数据库配置失败: {e}")
        raise

def test_database_connection():
    """测试数据库连接"""
    try:
        db_config = load_db_config()
        logger.info("正在测试数据库连接...")

        connection = mysql.connector.connect(**db_config)
        if connection.is_connected():
            logger.info("数据库连接测试成功")
            connection.close()
            return True
        else:
            logger.error("数据库连接测试失败")
            return False

    except Error as e:
        logger.error(f"数据库连接失败: {e}")
        logger.info("请检查数据库服务、连接信息和用户权限")
        return False
    except Exception as e:
        logger.error(f"数据库连接测试异常: {e}")
        return False

def check_database_tables():
    """检查数据库表是否存在"""
    required_tables = ['hs_topic', 'hs_related_links', 'crawler_config']

    try:
        db_config = load_db_config()
        connection = mysql.connector.connect(**db_config)
        cursor = connection.cursor()

        cursor.execute("SHOW TABLES")
        existing_tables = [table[0] for table in cursor.fetchall()]

        missing_tables = [table for table in required_tables if table not in existing_tables]

        if missing_tables:
            logger.error(f"数据库中缺少以下表: {missing_tables}")
            return False

        logger.info("数据库表检查通过")
        return True

    except Exception as e:
        logger.error(f"检查数据库表失败: {e}")
        return False
    finally:
        if 'cursor' in locals():
            cursor.close()
        if 'connection' in locals() and connection.is_connected():
            connection.close()

class CrawlerConfigManager:
    """爬虫配置管理器"""
    def __init__(self, db_manager):
        self.db = db_manager
        self.config_cache = {}
        self.load_config()

    def load_config(self):
        """加载爬虫配置"""
        try:
            query = """SELECT config_key, config_value, config_type 
                      FROM crawler_config 
                      WHERE is_active = 1 
                      ORDER BY config_key"""

            cursor = self.db.connection.cursor()
            cursor.execute(query)
            results = cursor.fetchall()
            cursor.close()

            self.config_cache = {}
            for config_key, config_value, config_type in results:
                value = self._parse_config_value(config_value, config_type)
                self.config_cache[config_key] = value

            logger.info(f"加载配置项: {len(self.config_cache)} 个")

        except Exception as e:
            logger.error(f"加载配置失败: {e}")
            self.use_default_config()

    def _parse_config_value(self, config_value, config_type):
        """解析配置值 - 修复了JSON解析问题"""
        if config_type == 'json':
            try:
                # 首先尝试直接解析JSON
                value = json.loads(config_value)
                # 确保返回的是列表
                if not isinstance(value, list):
                    value = [str(value)]
                return value
            except json.JSONDecodeError:
                logger.warning(f"JSON解析失败，尝试修复格式: {config_value}")
                try:
                    # 尝试修复常见的JSON格式问题
                    fixed_value = self._fix_json_format(config_value)
                    value = json.loads(fixed_value)
                    if not isinstance(value, list):
                        value = [str(value)]
                    return value
                except json.JSONDecodeError:
                    logger.warning(f"JSON修复失败，按逗号分割: {config_value}")
                    # 如果还是失败，尝试按逗号分割
                    return self._parse_comma_separated(config_value)
        elif config_type == 'int':
            try:
                return int(config_value)
            except ValueError:
                logger.warning(f"整数转换失败，使用原始值: {config_value}")
                return config_value
        elif config_type == 'bool':
            return config_value.lower() in ('true', '1', 'yes', 'on')
        else:
            return config_value

    def _fix_json_format(self, config_value):
        """修复JSON格式问题"""
        # 移除首尾可能的多余字符
        config_value = config_value.strip()

        # 如果不是以[开头，尝试添加
        if not config_value.startswith('['):
            config_value = '[' + config_value

        # 如果不是以]结尾，尝试添加
        if not config_value.endswith(']'):
            config_value = config_value + ']'

        # 修复常见的引号问题
        # 将 "baidu.com/link"] 这样的格式修复为 "baidu.com/link\"]]
        config_value = re.sub(r'"([^"]*)"([^"]*)"([^"]*)"', r'"\1\"\2\"\3"', config_value)

        return config_value

    def _parse_comma_separated(self, config_value):
        """解析逗号分隔的字符串"""
        # 移除首尾的方括号
        config_value = config_value.strip('[]')

        # 使用正则表达式分割，考虑引号内的逗号
        parts = re.findall(r'"[^"]*"', config_value)

        if not parts:
            # 如果没有找到引号，按逗号简单分割
            parts = [v.strip().strip('"\'') for v in config_value.split(',') if v.strip()]
        else:
            # 清理引号
            parts = [part.strip('"') for part in parts]

        return parts

    def use_default_config(self):
        """使用默认配置"""
        logger.info("使用默认爬虫配置")
        self.config_cache = {
            'main_url': 'https://top.baidu.com/board?tab=realtime',
            'main_selectors': ['div.category-wrap_iQLoo.horizontal_1eKyQ'],
            'title_selectors': ['div.c-single-text-ellipsis'],
            'heat_selectors': ['div.hot-index_1Bl1a'],
            'detail_link_selectors': ['a.look-more_3oNWC'],
            'related_link_selectors': [
                'div.render-item_GS8wb a.group-sub-title_1EfHl',
                'div.group-content_3jCZd a.tts-title',
                'div.c-row a[href*="baidu.com/link"]',
                'div.title-wrapper_6E6PV a.sc-link',
                'h3.t a.sc-link',
                'a[href*="www.baidu.com/link?url="]'
            ],
            'backup_selectors': ['div.result.c-container'],
            'excluded_urls': ['#', 'javascript:', 'void(0)']
        }

    def get_config(self, key, default=None):
        """获取配置值"""
        return self.config_cache.get(key, default)

    def get_main_url(self):
        return self.get_config('main_url', 'https://top.baidu.com/board?tab=realtime')

    def get_main_selectors(self):
        return self.get_config('main_selectors', ['div.category-wrap_iQLoo.horizontal_1eKyQ'])

    def get_title_selectors(self):
        return self.get_config('title_selectors', ['div.c-single-text-ellipsis'])

    def get_heat_selectors(self):
        return self.get_config('heat_selectors', ['div.hot-index_1Bl1a'])

    def get_detail_link_selectors(self):
        return self.get_config('detail_link_selectors', ['a.look-more_3oNWC'])

    def get_related_link_selectors(self):
        return self.get_config('related_link_selectors', [
            'div.render-item_GS8wb a.group-sub-title_1EfHl',
            'div.group-content_3jCZd a.tts-title',
            'div.c-row a[href*="baidu.com/link"]',
            'div.title-wrapper_6E6PV a.sc-link',
            'h3.t a.sc-link',
            'a[href*="www.baidu.com/link?url="]'
        ])

    def get_backup_selectors(self):
        return self.get_config('backup_selectors', ['div.result.c-container'])

    def get_excluded_urls(self):
        return self.get_config('excluded_urls', ['#', 'javascript:', 'void(0)'])

class DatabaseManager:
    """数据库管理器"""
    def __init__(self):
        self.connection = None
        self.connect()

    def connect(self):
        """连接数据库"""
        try:
            db_config = load_db_config()
            self.connection = mysql.connector.connect(**db_config)
            logger.info('数据库连接成功')
        except Error as e:
            logger.error(f"数据库连接失败: {e}")
            raise

    def execute_query(self, query, params=None, fetch=False):
        """执行查询"""
        if should_stop():
            return None

        try:
            cursor = self.connection.cursor()
            cursor.execute(query, params or ())

            if fetch:
                result = cursor.fetchone()
            else:
                self.connection.commit()
                result = cursor.lastrowid if 'INSERT' in query.upper() else True

            cursor.close()
            return result
        except Error as e:
            logger.error(f"数据库操作失败: {e}")
            self.connection.rollback()
            return None

    def check_topic_exists(self, title):
        """检查主题是否存在"""
        query = "SELECT id FROM hs_topic WHERE title = %s AND DATE(created_time) = CURDATE()"
        result = self.execute_query(query, (title,), fetch=True)
        return (True, result[0]) if result else (False, None)

    def insert_topic(self, topic_data):
        """插入或更新主题"""
        exists, topic_id = self.check_topic_exists(topic_data['title'])

        if exists:
            query = "UPDATE hs_topic SET heat_value = %s, updated_time = NOW() WHERE id = %s"
            success = self.execute_query(query, (topic_data['heat_value'], topic_id))
            logger.info(f"更新主题热度: {topic_data['title']}")
            return (topic_id, False) if success else (None, False)
        else:
            topic_code = self.generate_topic_code()
            query = """INSERT INTO hs_topic (topic_code, title, heat_value, original_url, created_by, updated_by) 
                       VALUES (%s, %s, %s, %s, 'system', 'system')"""
            params = (topic_code, topic_data['title'], topic_data['heat_value'], topic_data['original_url'])

            topic_id = self.execute_query(query, params)
            if topic_id:
                logger.info(f"新增主题: {topic_data['title']}")
                return topic_id, True
            return None, False

    def insert_related_links(self, topic_id, urls):
        """插入相关链接"""
        if not urls:
            return True

        # 检查已存在的链接
        query = "SELECT related_url FROM hs_related_links WHERE topic_id = %s"
        cursor = self.connection.cursor()
        cursor.execute(query, (topic_id,))
        existing_urls = {row[0] for row in cursor.fetchall()}
        cursor.close()

        # 过滤新链接
        new_urls = [url for url in urls if url not in existing_urls]

        if new_urls:
            query = "INSERT INTO hs_related_links (topic_id, related_url, created_by, updated_by) VALUES (%s, %s, 'system', 'system')"
            cursor = self.connection.cursor()
            try:
                cursor.executemany(query, [(topic_id, url) for url in new_urls])
                self.connection.commit()
                logger.info(f"插入 {len(new_urls)} 个相关链接")
            except Error as e:
                logger.error(f"插入相关链接失败: {e}")
                self.connection.rollback()
            finally:
                cursor.close()

        return True

    def generate_topic_code(self):
        """生成主题编号"""
        query = "SELECT MAX(topic_code) FROM hs_topic WHERE topic_code LIKE 'BD%'"
        result = self.execute_query(query, fetch=True)

        if not result or not result[0]:
            return 'BD0001'

        try:
            last_num = int(result[0][2:])
            return f'BD{str(last_num + 1).zfill(4)}'
        except ValueError:
            return 'BD0001'

    def close(self):
        """关闭连接"""
        if self.connection and self.connection.is_connected():
            self.connection.close()
            logger.info('数据库连接已关闭')

def make_request(url, headers):
    """发送HTTP请求"""
    for attempt in range(CONFIG['max_retries']):
        if should_stop():
            raise Exception("检测到停止信号")

        try:
            response = requests.get(url, headers=headers, timeout=CONFIG['timeout'])
            if response.status_code == 200:
                return response
            logger.warning(f"请求失败，状态码: {response.status_code}, 重试 {attempt + 1}")
        except Exception as e:
            logger.error(f"请求错误: {e}, 重试 {attempt + 1}")
            if attempt == CONFIG['max_retries'] - 1:
                raise

        time.sleep(CONFIG['retry_delay'])

    return None

def get_related_links(detail_url, headers, config_manager):
    """获取相关链接"""
    if should_stop():
        return []

    try:
        response = make_request(detail_url, headers)
        if not response:
            return []

        soup = BeautifulSoup(response.text, 'html.parser')
        selectors = config_manager.get_related_link_selectors()
        excluded_urls = config_manager.get_excluded_urls()

        urls = set()
        for selector in selectors:
            try:
                links = soup.select(selector)
                for link in links:
                    if 'href' in link.attrs:
                        url = link['href']
                        if not any(exclude in url for exclude in excluded_urls):
                            urls.add(url)
                            if len(urls) >= CONFIG['max_related_items']:
                                break
                if urls:
                    break
            except Exception as e:
                logger.warning(f"选择器解析失败 [{selector}]: {e}")
                continue

        # 备用方案
        if not urls:
            backup_selectors = config_manager.get_backup_selectors()
            for selector in backup_selectors:
                try:
                    containers = soup.select(selector)
                    for container in containers[:CONFIG['max_related_items']]:
                        mu_attr = container.get('mu', '')
                        if mu_attr and not mu_attr.startswith('http://fakeurl'):
                            urls.add(mu_attr)
                except Exception as e:
                    logger.warning(f"备用选择器解析失败 [{selector}]: {e}")

        logger.info(f"获取到 {len(urls)} 个相关链接")
        return list(urls)

    except Exception as e:
        logger.error(f"获取相关链接失败: {e}")
        return []

def extract_with_selectors(soup, selectors, get_text=True):
    """使用选择器提取内容"""
    for selector in selectors:
        try:
            elements = soup.select(selector)
            if elements:
                return elements[0].text.strip() if get_text else elements[0]
        except Exception as e:
            logger.warning(f"选择器 [{selector}] 解析失败: {e}")
            continue
    return None

def scrape_baidu_hot_searches():
    """爬取百度热搜"""
    if should_stop():
        logger.info("检测到停止信号，取消任务")
        return

    logger.info("开始爬取百度热搜...")

    try:
        db = DatabaseManager()
        config_manager = CrawlerConfigManager(db)

        url = config_manager.get_main_url()
        headers = {'User-Agent': CONFIG['user_agent']}

        response = make_request(url, headers)
        if not response:
            logger.error("无法获取热搜数据")
            return

        soup = BeautifulSoup(response.text, 'html.parser')

        # 获取热搜数据
        main_selectors = config_manager.get_main_selectors()
        hot_searches = []
        for selector in main_selectors:
            try:
                hot_searches = soup.select(selector)
                if hot_searches:
                    break
            except Exception as e:
                logger.warning(f"主选择器 [{selector}] 解析失败: {e}")
                continue

        if not hot_searches:
            logger.warning("未找到热搜数据")
            return

        processed_count = 0
        for hot_search in hot_searches:
            if should_stop():
                logger.info(f"停止信号，已处理 {processed_count} 条")
                break

            try:
                title = extract_with_selectors(hot_search, config_manager.get_title_selectors())
                if not title:
                    continue

                heat_text = extract_with_selectors(hot_search, config_manager.get_heat_selectors())
                heat_value = int(heat_text) if heat_text and heat_text.isdigit() else 0

                detail_link_elem = extract_with_selectors(hot_search, config_manager.get_detail_link_selectors(), get_text=False)
                original_url = detail_link_elem.get('href', '') if detail_link_elem else ""

                topic_data = {
                    'title': title,
                    'heat_value': heat_value,
                    'original_url': original_url
                }

                logger.info(f"处理: {title} (热度: {heat_value})")
                topic_id, is_new = db.insert_topic(topic_data)

                # 获取相关链接（仅新主题）
                if topic_id and is_new and original_url:
                    related_links = get_related_links(original_url, headers, config_manager)
                    if related_links:
                        db.insert_related_links(topic_id, related_links)
                    time.sleep(1)

                processed_count += 1

            except Exception as e:
                logger.error(f"处理热搜条目失败: {e}")
                continue

        logger.info(f"爬取完成，共处理 {processed_count} 条")

    except Exception as e:
        logger.error(f"爬取失败: {e}")
    finally:
        if 'db' in locals():
            db.close()

def run_scheduler():
    """运行定时任务"""
    schedule.clear()

    for time_str in CONFIG['schedule_times']:
        schedule.every().day.at(time_str).do(scrape_baidu_hot_searches)
        logger.info(f"设置定时任务: 每天 {time_str}")

    logger.info("调度器启动")

    try:
        while not should_stop():
            schedule.run_pending()
            time.sleep(60)
        logger.info("检测到停止信号，程序退出")
    except KeyboardInterrupt:
        logger.info("程序被手动停止")
    except Exception as e:
        logger.error(f"调度器错误: {e}")

def run_immediate():
    """立即执行"""
    logger.info("立即执行模式")
    if os.path.exists(STOP_FLAG_FILE):
        os.remove(STOP_FLAG_FILE)
    scrape_baidu_hot_searches()

def main():
    """主函数"""
    try:
        # 检查配置文件
        config_file = get_config_file_path()
        if not os.path.exists(config_file):
            logger.error(f"配置文件不存在: {config_file}")
            if create_default_db_config():
                logger.info("请修改配置文件中的数据库连接信息后重新运行程序")
                return
            else:
                logger.error("创建默认配置文件失败")
                return

        # 一次性检查数据库连接和表结构
        if not test_database_connection() or not check_database_tables():
            logger.error("数据库检查失败，程序退出")
            return

        if len(sys.argv) > 1 and sys.argv[1] == "--immediate":
            run_immediate()
        else:
            if os.path.exists(STOP_FLAG_FILE):
                os.remove(STOP_FLAG_FILE)

            logger.info("=" * 50)
            logger.info("百度热搜爬虫启动")
            logger.info(f"定时: {CONFIG['schedule_times']}")
            logger.info("使用 Ctrl+C 或创建 'stop_crawler.flag' 文件停止")
            logger.info("=" * 50)

            run_scheduler()
    except Exception as e:
        logger.error(f"程序启动失败: {e}")
        sys.exit(1)

if __name__ == "__main__":
    main()