import requests
from bs4 import BeautifulSoup
import pandas as pd
import logging
import re
import os
import importlib

# 检查必要的依赖库
required_libraries = {
    'requests': 'requests',
    'bs4': 'beautifulsoup4',
    'pandas': 'pandas',
    'openpyxl': 'openpyxl'
}

missing_libraries = []
for lib, package in required_libraries.items():
    if not importlib.util.find_spec(lib):
        missing_libraries.append(package)

if missing_libraries:
    print("检测到缺少必要的库，请先安装：")
    print(f"在命令提示符中运行：D:\\作业\\.venv\\Scripts\\pip.exe install {' '.join(missing_libraries)}")
    exit(1)

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s-%(levelname)s-%(message)s')


def crawl_sina_dividend(stock_code: str = "600519") -> pd.DataFrame:
    """
    从新浪财经爬取指定股票的历史分红数据

    :param stock_code: 股票代码，默认为"600519"（贵州茅台）
    :return: 包含分红数据的 DataFrame，如果爬取失败则返回空 DataFrame
    """
    # 新浪财经分红数据的URL
    url = f"https://vip.stock.finance.sina.com.cn/corp/go.php/vISSUE_ShareBonus/stockid/{stock_code}.phtml"

    try:
        # 添加请求头，模拟浏览器访问
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
            "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
            "Connection": "keep-alive"
        }

        response = requests.get(url, headers=headers)
        response.raise_for_status()  # 检查请求是否成功
        response.encoding = "gb2312"  # 新浪财经使用gb2312编码

        # 保存网页内容用于调试
        with open("debug_page.html", "w", encoding="utf-8") as f:
            f.write(response.text)
        logging.info("网页内容已保存到debug_page.html，可用于检查结构")

        soup = BeautifulSoup(response.text, 'html.parser')

        # 尝试不同的表格选择器
        table_selectors = [
            'table[class="table_data"]',
            'table[id="sharebonus_1"]',
            'table[class="datatbl"]',
            'table[cellspacing="1"]',
            'table[bgcolor="#cccccc"]'
        ]

        table = None
        for selector in table_selectors:
            table = soup.select_one(selector)
            if table:
                logging.info(f"使用选择器 {selector} 找到了数据表格")
                break

        if not table:
            logging.error("未找到数据表格，可能网页结构已变化")
            return pd.DataFrame()

        # 获取所有行数据
        all_rows = []
        for row in table.find_all('tr'):
            cells = [cell.text.strip() for cell in row.find_all(['th', 'td'])]
            # 只保留非空行
            if cells and any(cells):
                all_rows.append(cells)

        if not all_rows:
            logging.error("未找到任何数据行")
            return pd.DataFrame()

        # 自动检测表头 - 解决列数不匹配问题
        # 找到最长的行作为表头候选
        max_length = max(len(row) for row in all_rows)
        header_row = next((row for row in all_rows if len(row) == max_length), all_rows[0])

        # 提取数据行（排除表头行）
        data_rows = []
        for row in all_rows:
            if row != header_row:
                # 如果数据行列数少于表头，补充空值
                if len(row) < len(header_row):
                    row += [''] * (len(header_row) - len(row))
                # 如果数据行列数多于表头，截断
                elif len(row) > len(header_row):
                    row = row[:len(header_row)]
                data_rows.append(row)

        if not data_rows:
            logging.warning("未找到有效数据记录")
            return pd.DataFrame()

        # 创建DataFrame并保存
        df = pd.DataFrame(data_rows, columns=header_row)
        df.to_excel("新浪财经历史分红数据.xlsx", index=False)
        logging.info(f"成功爬取数据，保存至新浪财经历史分红数据.xlsx，共 {len(df)} 条记录")
        return df

    except requests.exceptions.RequestException as e:
        logging.error(f"爬取数据时发生请求错误：{e}")
        return pd.DataFrame()
    except Exception as e:
        logging.error(f"爬取数据时发生未知错误：{e}")
        return pd.DataFrame()


def preprocess_dividend_data(input_file: str = "新浪财经历史分红数据.xlsx",
                             output_file: str = "新浪财经历史分红数据_预处理.xlsx",
                             stock_code: str = "600519") -> None:
    """
    对新浪财经历史分红数据进行预处理

    :param input_file: 原始数据文件路径
    :param output_file: 预处理后的数据保存路径
    :param stock_code: 股票代码，当需要自动爬取数据时使用
    """
    try:
        # 检查原始数据文件是否存在
        if not os.path.exists(input_file):
            logging.warning(f"未找到原始数据文件：{input_file}")
            choice = input("是否要自动爬取数据？(y/n): ").strip().lower()
            if choice == 'y':
                logging.info(f"开始自动爬取 {stock_code} 的分红数据...")
                # 调用爬虫函数获取数据
                df = crawl_sina_dividend(stock_code)
                if df.empty:
                    logging.error("自动爬取数据失败，请检查网络或尝试其他股票代码")
                    logging.info("建议尝试这些股票代码：600519(贵州茅台)、601318(中国平安)、600036(招商银行)")
                    return
            else:
                logging.info("用户选择不自动爬取数据，程序退出")
                return

        # 读取原始数据
        df = pd.read_excel(input_file)
        if df.empty:
            logging.error("原始数据为空，无法进行预处理")
            return

        logging.info(f"开始预处理，原始数据共 {len(df)} 条记录")

        # 去除重复行
        df = df.drop_duplicates()

        # 处理日期列（尝试常见的日期列名）
        date_columns = ['公告日期', '股权登记日', '除权除息日', '派息日', '红放日', '日期']
        for col in date_columns:
            # 模糊匹配列名
            matching_cols = [c for c in df.columns if col in c]
            for match_col in matching_cols:
                df[match_col] = pd.to_datetime(df[match_col], errors='coerce')
                # 移除日期转换失败的行
                invalid_dates = df[match_col].isna().sum()
                if invalid_dates > 0:
                    df = df.dropna(subset=[match_col])
                    logging.info(f"移除了 {invalid_dates} 条 {match_col} 无效的记录")

        # 处理数值列（提取数字并转换为数值类型）
        for col in df.columns:
            # 查找包含数字和特殊字符的列（可能是分红金额等）
            if df[col].dtype == 'object' and any(re.search(r'[\d%.]', str(val)) for val in df[col].head(10)):
                try:
                    # 提取数字和百分号
                    df[col] = df[col].apply(lambda x: re.sub(r'[^\d%.]', '', str(x)))
                    # 转换为数值类型
                    if '%' in ''.join(df[col].astype(str).head(10)):
                        df[col] = df[col].str.replace('%', '').astype(float) / 100
                    else:
                        df[col] = pd.to_numeric(df[col], errors='coerce')
                except Exception as e:
                    logging.warning(f"转换列 {col} 为数值类型时出错: {e}")

        # 移除含有缺失值的行
        before = len(df)
        df = df.dropna()
        after = len(df)
        if before > after:
            logging.info(f"移除了 {before - after} 条含有缺失值的记录")

        # 按日期排序（如果存在日期列）
        for col in date_columns:
            matching_cols = [c for c in df.columns if col in c]
            for match_col in matching_cols:
                df = df.sort_values(by=match_col, ascending=False)
                break

        # 保存预处理后的数据
        df.to_excel(output_file, index=False)
        logging.info(f"数据预处理完成，保存至 {output_file}，共 {len(df)} 条记录")

    except Exception as e:
        logging.error(f"数据预处理时发生错误：{e}")


if __name__ == "__main__":
    # 尝试贵州茅台(600519)
    preprocess_dividend_data(stock_code="600519")
