import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import random
from tqdm import tqdm

# 设置请求头
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}


# 爬取单个页面的数据
def crawl_page(page_num):
    url = f"https://vip.stock.finance.sina.com.cn/q/go.php/vInvestConsult/kind/lsfh/index.phtml?p={page_num}"
    try:
        print(f"正在爬取第{page_num}页: {url}")
        response = requests.get(url, headers=headers, timeout=10)
        response.encoding = 'gb2312'  # 新浪财经使用gb2312编码

        # 打印响应状态码和前500个字符，用于调试
        print(f"响应状态码: {response.status_code}")
        print(f"页面前500字符: {response.text[:500]}...")

        soup = BeautifulSoup(response.text, 'html.parser')

        # 尝试多种方式查找表格
        table = soup.find('table', class_='table')
        if not table:
            # 尝试查找所有表格，然后选择看起来像数据表格的
            tables = soup.find_all('table')
            print(f"找到{len(tables)}个表格元素")

            # 尝试找到包含代码、名称等关键词的表格
            for i, tbl in enumerate(tables):
                table_text = tbl.text
                if '代码' in table_text and '名称' in table_text:
                    table = tbl
                    print(f"在第{i}个表格中找到匹配的数据")
                    break

        if not table:
            print(f"第{page_num}页未找到数据表")
            # 保存页面内容用于调试
            with open(f"page_{page_num}.html", "w", encoding='utf-8') as f:
                f.write(response.text)
            return []

        # 提取数据行（直接提取所有tr，不再依赖thead和tbody）
        rows = table.find_all('tr')
        page_data = []

        # 跳过表头行（通常是第一行或前几行）
        for i, row in enumerate(rows):
            cols = row.find_all(['td', 'th'])
            row_data = [col.text.strip() for col in cols]

            # 过滤掉表头行和空行
            if len(row_data) > 5 and row_data[0] and not row_data[0].startswith('代码'):
                page_data.append(row_data)

        print(f"第{page_num}页成功爬取{len(page_data)}条数据")
        return page_data
    except Exception as e:
        print(f"爬取第{page_num}页时出错: {e}")
        import traceback
        traceback.print_exc()
        return []


# 主函数，爬取1-100页数据
def main():
    all_data = []
    headers = ['代码', '名称', '上市日期', '累计股息(%)', '年均股息(%)', '分红次数', '融资总额(亿)', '融资次数', '详细']

    print("开始爬取新浪财经历史分红数据...")
    for page in tqdm(range(1, 101)):
        page_data = crawl_page(page)
        all_data.extend(page_data)

        # 随机延迟，避免被反爬
        time.sleep(random.uniform(1, 3))

    # 创建DataFrame
    df = pd.DataFrame(all_data, columns=headers)

    # 保存到Excel文件
    output_file = "新浪财经历史分红数据.xlsx"
    df.to_excel(output_file, index=False, engine='openpyxl')

    print(f"\n爬取完成！共获取{len(df)}条数据")
    print(f"数据已保存至: {output_file}")
    print(f"数据形状: {df.shape}")


if __name__ == "__main__":
    main()