import requests
import pandas as pd
import time
import random
from bs4 import BeautifulSoup
import re


def get_dividend_data(page):
    """
    获取单页分红数据
    """
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Accept-Encoding': 'gzip, deflate, br',
    }

    url = f"https://vip.stock.finance.sina.com.cn/q/go.php/vInvestConsult/kind/lsfh/index.phtml?p={page}"

    try:
        response = requests.get(url, headers=headers, timeout=10)
        response.encoding = 'gbk'

        if response.status_code == 200:
            soup = BeautifulSoup(response.text, 'html.parser')

            # 查找数据表格
            table = soup.find('table', {'class': 'list_table'})
            if not table:
                # 尝试其他表格选择器
                tables = soup.find_all('table')
                for t in tables:
                    if '代码' in t.get_text() and '名称' in t.get_text():
                        table = t
                        break

            if table:
                # 提取表头
                headers = []
                header_row = table.find('tr', class_=re.compile('title|head'))
                if not header_row:
                    header_row = table.find('tr')

                if header_row:
                    headers = [th.get_text().strip() for th in header_row.find_all(['th', 'td'])]
                    print(f"第{page}页实际表头: {headers}, 列数: {len(headers)}")

                # 如果表头为空或不符合预期，使用默认表头
                if len(headers) < 5:
                    headers = ['代码', '名称', '上市日期', '累计股息(%)', '年均股息(%)',
                               '分红次数', '融资总额(亿元)', '详细', '未知列']  # 增加一列备用

                # 提取数据行
                data = []
                rows = table.find_all('tr')[1:]  # 跳过表头

                for row in rows:
                    # 跳过空行和表头行
                    if not row.get_text().strip() or 'tr_title' in row.get('class', []):
                        continue

                    cells = row.find_all(['td', 'th'])
                    if cells:
                        row_data = [cell.get_text().strip() for cell in cells]

                        # 确保数据行与表头长度匹配
                        if len(row_data) > 3:  # 至少有基本数据
                            # 补齐或截断数据行以匹配表头长度
                            if len(row_data) > len(headers):
                                row_data = row_data[:len(headers)]
                            elif len(row_data) < len(headers):
                                row_data.extend([''] * (len(headers) - len(row_data)))

                            data.append(row_data)

                if data:
                    df = pd.DataFrame(data, columns=headers)
                    return df

        return pd.DataFrame()

    except Exception as e:
        print(f"第{page}页请求失败: {str(e)}")
        return pd.DataFrame()


def crawl_all_pages():
    """
    爬取1-100页数据
    """
    all_data = []

    for page in range(1, 101):
        print(f"正在爬取第{page}页...")

        df = get_dividend_data(page)
        if not df.empty:
            all_data.append(df)
            print(f"第{page}页成功获取{len(df)}条数据")
        else:
            print(f"第{page}页未获取到数据")

        # 随机延迟，避免请求过快
        time.sleep(random.uniform(1, 2))

    if all_data:
        final_df = pd.concat(all_data, ignore_index=True)

        # 动态设置列名，根据实际列数
        col_count = len(final_df.columns)
        print(f"数据总列数: {col_count}")

        # 根据列数设置合适的列名
        if col_count == 8:
            final_df.columns = ['代码', '名称', '上市日期', '累计股息(%)', '年均股息(%)',
                                '分红次数', '融资总额(亿元)', '详细']
        elif col_count == 9:
            final_df.columns = ['代码', '名称', '上市日期', '累计股息(%)', '年均股息(%)',
                                '分红次数', '融资总额(亿元)', '详细', '备注']
        elif col_count == 10:
            final_df.columns = ['代码', '名称', '上市日期', '累计股息(%)', '年均股息(%)',
                                '分红次数', '融资总额(亿元)', '详细', '备注1', '备注2']
        else:
            # 对于其他列数，使用通用列名
            base_names = ['代码', '名称', '上市日期', '累计股息(%)', '年均股息(%)',
                          '分红次数', '融资总额(亿元)', '详细']
            # 补齐列名
            all_names = base_names + [f'额外列{i}' for i in range(col_count - len(base_names))]
            final_df.columns = all_names[:col_count]

        print(f"设置的列名: {final_df.columns.tolist()}")
        return final_df
    else:
        return pd.DataFrame()


def main():
    """
    主函数
    """
    print("开始爬取新浪财经历史分红数据(1-100页)...")

    # 爬取数据
    df = crawl_all_pages()

    if not df.empty:
        # 保存到Excel
        filename = "新浪财经历史分红数据.xlsx"
        df.to_excel(filename, index=False, engine='openpyxl')

        print(f"\n爬取完成! 共获取{len(df)}条数据")
        print(f"数据已保存到: {filename}")

        # 显示基本信息
        print(f"\n数据形状: {df.shape}")
        print("\n前5行数据:")
        print(df.head())
        print("\n列名和数据类型:")
        print(df.dtypes)
    else:
        print("未能获取到任何数据")


if __name__ == "__main__":
    main()