import requests
import pandas as pd
from bs4 import BeautifulSoup
import time
import random


def crawl_sina_dividend_data(pages=100):
    """
    爬取新浪财经历史分红数据

    Parameters:
    pages (int): 要爬取的页数，默认为100页
    """

    # 存储所有数据的列表
    all_data = []

    # 请求头，模拟浏览器访问
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
        'Accept-Encoding': 'gzip, deflate, br',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1',
    }

    print("开始爬取新浪财经历史分红数据...")

    for page in range(1, pages + 1):
        try:
            # 构造URL吧·
            url = f'https://vip.stock.finance.sina.com.cn/q/go.php/vInvestConsult/kind/lsfh/index.phtml?p={page}'

            # 发送请求
            response = requests.get(url, headers=headers, timeout=10)
            response.encoding = 'gb2312'  # 新浪财经使用gb2312编码

            if response.status_code == 200:
                # 解析HTML
                soup = BeautifulSoup(response.text, 'html.parser')

                # 找到数据表格
                table = soup.find('table', {'class': 'list_table'})

                if table:
                    # 提取表头
                    if page == 1 and not all_data:  # 只在第一页提取表头
                        headers_row = table.find('tr')
                        if headers_row:
                            headers = [th.get_text(strip=True) for th in headers_row.find_all('th')]
                            if headers:
                                all_data.append(headers)

                    # 提取数据行
                    rows = table.find_all('tr')[1:]  # 跳过表头行

                    for row in rows:
                        cells = row.find_all('td')
                        if cells:
                            row_data = [cell.get_text(strip=True) for cell in cells]
                            all_data.append(row_data)

                    print(f"第{page}页数据爬取成功，共{len(rows)}行数据")
                else:
                    print(f"第{page}页未找到数据表格")
            else:
                print(f"第{page}页请求失败，状态码：{response.status_code}")

            # 随机延迟，避免请求过于频繁
            time.sleep(random.uniform(1, 3))

        except Exception as e:
            print(f"第{page}页爬取失败，错误信息：{str(e)}")
            continue

    return all_data


def save_to_file(data, filename="新浪财经历史分红数据"):
    """
    将数据保存到文件

    Parameters:
    data (list): 要保存的数据
    filename (str): 文件名（不含扩展名）
    """

    if not data:
        print("没有数据可保存")
        return

    # 保存为CSV文件
    csv_filename = f"{filename}.csv"
    try:
        # 使用pandas保存为CSV
        df = pd.DataFrame(data[1:], columns=data[0])
        df.to_csv(csv_filename, index=False, encoding='utf-8-sig')
        print(f"数据已保存为CSV文件：{csv_filename}")
    except Exception as e:
        print(f"保存CSV文件失败：{str(e)}")

        # 如果pandas保存失败，尝试直接保存为文本文件
        try:
            txt_filename = f"{filename}.txt"
            with open(txt_filename, 'w', encoding='utf-8') as f:
                for row in data:
                    f.write('\t'.join(row) + '\n')
            print(f"数据已保存为文本文件：{txt_filename}")
        except Exception as e2:
            print(f"保存文本文件也失败：{str(e2)}")

    # 同时保存为Excel文件
    try:
        excel_filename = f"{filename}.xlsx"
        df = pd.DataFrame(data[1:], columns=data[0])
        df.to_excel(excel_filename, index=False)
        print(f"数据已保存为Excel文件：{excel_filename}")
    except Exception as e:
        print(f"保存Excel文件失败：{str(e)}")


def main():
    """
    主函数
    """
    print("新浪财经历史分红数据爬虫启动")
    print("=" * 50)

    # 爬取数据
    dividend_data = crawl_sina_dividend_data(pages=100)

    if dividend_data:
        print(f"\n爬取完成！共获取{len(dividend_data) - 1}行数据（含表头）")

        # 显示前几行数据预览
        print("\n数据预览：")
        for i in range(min(3, len(dividend_data))):
            print(dividend_data[i])

        # 保存数据
        save_to_file(dividend_data, "新浪财经历史分红数据")

        print("\n数据保存完成！")
    else:
        print("未能获取到任何数据")


if __name__ == "__main__":
    main()