import time
import random
import csv
import os
import traceback
import logging
from datetime import datetime
from bs4 import BeautifulSoup
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

logger = logging.getLogger(__name__)


class DataParser:
    @staticmethod
    def parse_trades_from_html(html_content):
        """从HTML内容解析交易记录"""
        logger.info("开始解析HTML结构...")

        # 使用BeautifulSoup解析HTML
        soup = BeautifulSoup(html_content, 'html.parser')
        trades = []

        # 查找所有<ul>元素（每个<ul>代表一个高手）
        ul_list = soup.select('div.data ul')
        if not ul_list:
            logger.warning("未找到交易记录容器")
            # 保存解析失败的HTML用于调试
            with open("parse_error.html", "w", encoding="utf-8") as f:
                f.write(html_content)
            logger.info("已保存解析失败的HTML到 parse_error.html")
            return trades

        logger.info(f"找到 {len(ul_list)} 个高手记录")

        for ul in ul_list:
            # 获取所有<li>元素（每<li>代表一个字段）
            li_items = ul.select('li')
            if not li_items:
                logger.warning(f"跳过记录，未找到字段")
                continue

            # 确保有足够的字段（至少10个，不包括交易详细）
            if len(li_items) < 10:
                logger.warning(f"跳过记录，字段不足: {len(li_items)}")
                continue

            try:
                # 按顺序提取字段
                trade = {
                    "MasterName": li_items[0].get_text(strip=True) if len(li_items) > 0 else "未知高手",
                    "ExpertType": li_items[1].get_text(strip=True) if len(li_items) > 1 else "未知类型",
                    "DailyReturn": li_items[2].get_text(strip=True) if len(li_items) > 2 else "0.00%",
                    "TotalReturn": li_items[3].get_text(strip=True) if len(li_items) > 3 else "0.00%",
                    "TradeType": li_items[4].get_text(strip=True) if len(li_items) > 4 else "未知操作",
                    "StockCode": li_items[5].get_text(strip=True) if len(li_items) > 5 else "未知代码",
                    "StockName": li_items[6].get_text(strip=True) if len(li_items) > 6 else "未知股票",
                    "TradePrice": li_items[7].get_text(strip=True) if len(li_items) > 7 else "0.0",
                    "TradeVolume": li_items[8].get_text(strip=True) if len(li_items) > 8 else "0.00%",
                    "TradeDate": li_items[9].get_text(strip=True) if len(li_items) > 9 else datetime.now().strftime(
                        '%Y-%m-%d %H:%M')
                }

                # 清理字段
                for key in trade:
                    trade[key] = trade[key].replace('\n', '').replace('\t', '').strip()

                # 尝试解析价格
                try:
                    trade['TradePrice'] = float(trade['TradePrice'])
                except:
                    trade['TradePrice'] = 0.0

                # 简化日期处理：直接添加当前年份
                try:
                    # 获取当前年份
                    current_year = datetime.now().year

                    # 如果日期字符串包含时间（如 "08-26 14:18"）
                    if ' ' in trade['TradeDate']:
                        date_parts = trade['TradeDate'].split(' ')
                        date_str = f"{current_year}-{date_parts[0]} {date_parts[1]}"
                        trade['TradeDate'] = datetime.strptime(date_str, '%Y-%m-%d %H:%M')
                    # 如果日期字符串只有日期（如 "08-26"）
                    else:
                        date_str = f"{current_year}-{trade['TradeDate']}"
                        trade['TradeDate'] = datetime.strptime(date_str, '%Y-%m-%d')
                except Exception as e:
                    logger.warning(f"日期处理失败: {trade['TradeDate']}, 使用当前时间: {e}")
                    trade['TradeDate'] = datetime.now()

                # 添加默认字段
                trade['Source'] = '东方财富'
                trade['IsCopied'] = 0

                # 只检查关键字段是否有效
                key_fields = ['MasterName', 'StockCode', 'StockName', 'TradeType', 'TradeDate']
                if not all(trade.get(field) for field in key_fields):
                    logger.warning(f"跳过无效记录: {trade}")
                    continue

                trades.append(trade)
            except Exception as e:
                logger.error(f"解析记录失败: {e}")
                # 打印容器内容以便调试
                logger.debug(f"容器内容: {ul.prettify()[:200]}...")
                continue

        logger.info(f"成功解析 {len(trades)} 条交易记录")
        return trades

    @staticmethod
    def save_to_csv(trades, filename_prefix="高手交易记录"):
        """将交易记录保存为CSV文件"""
        if not trades:
            logger.warning("没有交易记录可保存")
            return None

        # 确保有表头
        headers = list(trades[0].keys()) if trades else []

        # 创建输出目录
        output_dir = "output"
        os.makedirs(output_dir, exist_ok=True)

        # 生成带时间戳的文件名
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = f"{filename_prefix}_{timestamp}.csv"
        filepath = os.path.join(output_dir, filename)

        # 保存为CSV
        with open(filepath, "w", newline="", encoding="utf-8-sig") as f:
            writer = csv.DictWriter(f, fieldnames=headers)
            writer.writeheader()
            writer.writerows(trades)

        logger.info(f"交易记录已保存到: {filepath}")
        return filepath


class WebCrawler:
    def __init__(self, driver, db_manager):
        self.driver = driver
        self.db_manager = db_manager
        self.last_check_time = None
        self.page_count = 1
        self.max_pages = 20
        self.has_more_pages = True
        self.is_initialized = False
        self.monitor = None  # 初始为None，稍后设置

    def set_monitor(self, monitor):
        """设置监控器实例"""
        self.monitor = monitor
        logger.info("已设置监控器实例到爬虫")

    def initialize_crawler(self, master_name=None):
        """初始化爬虫设置"""
        try:
            # 使用Selenium访问高手操作页面
            url = "https://group.eastmoney.com/MasterOpera.html"

            # 增加页面加载超时时间
            self.driver.set_page_load_timeout(15)
            self.driver.set_script_timeout(15)

            self.driver.get(url)
            logger.info(f"已访问: {url}")

            # 增加随机延迟
            time.sleep(random.uniform(0.5, 1.5))

            # 等待页面基本加载完成
            WebDriverWait(self.driver, 15).until(
                lambda d: d.execute_script("return document.readyState") == "complete"
            )
            logger.info("页面基本加载完成")

            # 如果指定了高手名称，则进行搜索
            if master_name:
                # 等待搜索框出现
                try:
                    WebDriverWait(self.driver, 15).until(
                        EC.element_to_be_clickable((By.CSS_SELECTOR, "input.search-input"))
                    )
                    logger.info("搜索框已加载")
                except:
                    logger.warning("等待搜索框加载超时")

                # 输入高手名称并搜索
                search_input = self.driver.find_element(By.CSS_SELECTOR, "input.search-input")
                search_input.clear()
                for char in master_name:
                    search_input.send_keys(char)
                    time.sleep(0.1)  # 模拟人输入
                logger.info(f"已输入高手名称: {master_name}")

                # 点击搜索按钮
                search_button = self.driver.find_element(By.CSS_SELECTOR, "button.search-btn")
                search_button.click()
                logger.info("已点击搜索按钮")

                # 等待搜索结果加载
                try:
                    WebDriverWait(self.driver, 15).until(
                        EC.presence_of_element_located((By.CSS_SELECTOR, ".master-list, .operate-list, .trade-list"))
                    )
                    logger.info("搜索结果已加载")
                except:
                    logger.warning("等待搜索结果加载超时")

            self.is_initialized = True
            self.page_count = 1
            self.has_more_pages = True
            logger.info("爬虫初始化完成")
            return True
        except Exception as e:
            logger.error(f"爬虫初始化失败: {str(e)}")
            return False

    def fetch_next_page(self):
        """抓取下一页的交易记录"""
        if not self.is_initialized:
            logger.error("爬虫未初始化，无法抓取数据")
            return []

        if not self.has_more_pages:
            logger.info("没有更多页面可抓取")
            return []

        logger.info(f"开始抓取第 {self.page_count} 页")

        try:
            # 获取当前页源码
            page_source = self.driver.page_source

            # 保存页面源码以便调试
            debug_file = f"debug_page_source_{self.page_count}.html"
            with open(debug_file, "w", encoding="utf-8") as f:
                f.write(page_source)
            logger.info(f"已保存页面源码: {debug_file}")

            # 使用解析器解析HTML内容
            trades = DataParser.parse_trades_from_html(page_source)
            logger.info(f"本页解析到 {len(trades)} 条交易记录")

            # 检查是否有新交易
            new_trades_found = False
            for trade in trades:
                # 确保MasterName字段存在
                if 'MasterName' not in trade:
                    logger.warning(f"交易记录缺少MasterName字段: {trade}")
                    continue

                # 获取高手ID
                master_id = self.db_manager.get_master_id_by_name(trade['MasterName'])

                if not master_id:
                    # 如果高手不存在，则保留该记录
                    new_trades_found = True
                    break
                else:
                    trade['MasterID'] = master_id

                # 确保trade字典包含TradeDate字段
                if 'TradeDate' not in trade:
                    logger.warning(f"交易记录缺少TradeDate字段: {trade}")
                    continue

                # 检查是否重复
                if not self.db_manager.is_duplicate_trade(master_id, trade):
                    new_trades_found = True
                    break

            # 如果有新交易，通知监控器处理
            if new_trades_found and self.monitor:
                self.monitor.process_page(self.page_count, trades)
                logger.info(f"本页有新交易记录")
            else:
                logger.info("本页所有交易记录都已存在")

            # 准备翻到下一页
            self.try_next_page()

            return trades
        except Exception as e:
            logger.error(f"抓取第 {self.page_count} 页失败: {str(e)}")
            return []

    def try_next_page(self):
        """尝试翻到下一页"""
        try:
            # 查找下一页按钮
            next_buttons = self.driver.find_elements(By.CSS_SELECTOR,
                                                     "li.next > a, .next-page > a, .pagination-next > a")
            if not next_buttons:
                logger.info("找不到下一页按钮")
                self.has_more_pages = False
                return False

            next_button = next_buttons[0]

            # 检查下一页按钮是否可用
            if "disabled" in next_button.get_attribute("class"):
                logger.info("已到达最后一页")
                self.has_more_pages = False
                return False

            # 点击下一页按钮
            next_button.click()
            logger.info("已点击下一页按钮")

            # 等待新页面加载
            try:
                # 等待高手列表加载
                WebDriverWait(self.driver, 10).until(
                    EC.presence_of_element_located((By.CSS_SELECTOR, ".master-list, .operate-list, .trade-list"))
                )
                logger.info("新页面已加载")
            except:
                try:
                    # 等待分页控件加载
                    WebDriverWait(self.driver, 6).until(
                        EC.presence_of_element_located((By.CSS_SELECTOR, "li.next, .next-page, .pagination-next"))
                    )
                    logger.info("分页控件已加载")
                except:
                    # 简单等待
                    time.sleep(2)
                    logger.info("简单等待2秒后继续")

            # 增加随机延迟
            time.sleep(random.uniform(1, 2))

            self.page_count += 1
            return True
        except Exception as e:
            logger.error(f"翻页失败: {str(e)}")
            self.has_more_pages = False
            return False

    def fetch_trades(self, master_name=None):
        """抓取高手交易记录 - 单页模式接口"""
        if not self.driver:
            logger.error("WebDriver未初始化，无法抓取数据")
            return []

        # 如果没有初始化或者没有更多页面，重新初始化
        if not self.is_initialized or not self.has_more_pages:
            self.initialize_crawler(master_name)

        # 抓取当前页
        return self.fetch_next_page()

    def fetch_all_trades(self):
        """抓取所有高手的交易记录 - 适配单页模式"""
        return self.fetch_trades()