import requests
import pandas as pd
import time
import os
from bs4 import BeautifulSoup


def crawl_sina_dividend_data():
    """
    爬取新浪财经历史分红数据第1~100页的内容
    """
    all_data = []

    for page in range(1, 101):
        print(f"正在爬取第{page}页数据...")

        # 新浪财经分红数据接口
        url = f"https://vip.stock.finance.sina.com.cn/q/go.php/vInvestConsult/kind/lsfh/index.phtml?p={page}"

        try:
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
            }

            response = requests.get(url, headers=headers, timeout=10)
            response.encoding = 'gbk'  # 新浪财经使用gbk编码

            if response.status_code == 200:
                soup = BeautifulSoup(response.text, 'html.parser')

                # 查找数据表格
                table = soup.find('table', {'class': 'list_table'})

                if table:
                    rows = table.find_all('tr')[1:]  # 跳过表头

                    for row in rows:
                        cols = row.find_all('td')
                        if len(cols) >= 8:
                            stock_code = cols[0].get_text(strip=True)
                            stock_name = cols[1].get_text(strip=True)
                            listing_date = cols[2].get_text(strip=True)
                            cumulative_dividend = cols[3].get_text(strip=True)
                            avg_dividend = cols[4].get_text(strip=True)
                            dividend_count = cols[5].get_text(strip=True)
                            total_financing = cols[6].get_text(strip=True)
                            details = cols[7].get_text(strip=True)

                            data = {
                                '代码': stock_code,
                                '名称': stock_name,
                                '上市日期': listing_date,
                                '累计股息(%)': cumulative_dividend,
                                '年均股息(%)': avg_dividend,
                                '分红次数': dividend_count,
                                '融资总额(亿元)': total_financing,
                                '详细': details
                            }
                            all_data.append(data)

                print(f"第{page}页爬取完成，获取到{len(rows)}条数据")
            else:
                print(f"第{page}页请求失败，状态码: {response.status_code}")

        except Exception as e:
            print(f"第{page}页爬取出错: {str(e)}")

        # 添加延时，避免请求过快
        time.sleep(1)

    return all_data


def save_to_excel(data, filename):
    """
    将数据保存为Excel文件
    """
    if data:
        df = pd.DataFrame(data)
        df.to_excel(filename, index=False, engine='openpyxl')
        print(f"数据已保存到 {filename}，共{len(data)}条记录")
    else:
        print("没有数据可保存")


def main():
    print("开始爬取新浪财经历史分红数据...")

    # 爬取数据
    dividend_data = crawl_sina_dividend_data()

    # 保存数据
    output_file = "新浪财经历史分红数据.xlsx"
    save_to_excel(dividend_data, output_file)

    print("数据爬取完成！")


if __name__ == "__main__":
    main()


