#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
新浪财经历史分红数据爬取工具
该工具用于爬取新浪财经网站历史分红数据第1~100页的内容，并将其存储为本地Excel文件
"""

import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import random

class SinaDividendCrawler:
    """新浪财经历史分红数据爬虫类"""
    
    def __init__(self):
        """初始化爬虫"""
        # 使用成功测试的URL格式
        self.base_url = "https://vip.stock.finance.sina.com.cn/q/go.php/vInvestConsult/kind/lsfh/index.phtml?p="
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8",
            "Accept-Language": "zh-CN,zh;q=0.9"
        }
        self.all_data = []
        print(f"初始化爬虫，使用基础URL: {self.base_url}")
    
    def crawl_page(self, page_num):
        """
        爬取指定页码的数据
        
        Args:
            page_num (int): 页码
            
        Returns:
            list: 该页面的数据列表
        """
        try:
            url = self.base_url + str(page_num)
            print(f"\n正在爬取第{page_num}页: {url}")
            
            # 发送请求
            response = requests.get(url, headers=self.headers, timeout=15)
            print(f"响应状态码: {response.status_code}")
            
            # 设置正确的编码
            response.encoding = 'gb2312'
            content = response.text
            
            # 解析页面
            soup = BeautifulSoup(content, 'html.parser')
            
            # 查找数据表格
            table = soup.find('table', {'class': 'list_table'})
            if not table:
                print(f"警告: 第{page_num}页未找到数据表")
                return []
            
            # 提取数据
            rows = table.find_all('tr')[1:]  # 跳过表头
            page_data = []
            
            for row in rows:
                cols = row.find_all('td')
                if len(cols) >= 9:  # 确保有足够的列
                    try:
                        code = cols[0].text.strip()
                        name = cols[1].text.strip()
                        listing_date = cols[2].text.strip()
                        total_dividend = cols[3].text.strip()
                        annual_dividend = cols[4].text.strip()
                        dividend_times = cols[5].text.strip()
                        financing_total = cols[6].text.strip()
                        financing_times = cols[7].text.strip()
                        detail = cols[8].find('a')['href'] if cols[8].find('a') else ''
                        
                        page_data.append({
                            '代码': code,
                            '名称': name,
                            '上市日期': listing_date,
                            '累计股息(%)': total_dividend,
                            '年均股息(%)': annual_dividend,
                            '分红次数': dividend_times,
                            '融资总额(亿)': financing_total,
                            '融资次数': financing_times,
                            '详细': detail
                        })
                    except Exception as e:
                        print(f"解析数据行时出错: {str(e)}")
                        continue
            
            print(f"第{page_num}页爬取完成，获取{len(page_data)}条数据")
            return page_data
            
        except Exception as e:
            print(f"爬取第{page_num}页时出错: {str(e)}")
            return []
    
    def crawl_all_pages(self, start_page=1, end_page=5):
        """
        爬取指定范围的所有页面数据
        
        Args:
            start_page (int): 起始页码
            end_page (int): 结束页码
        """
        print(f"\n开始爬取新浪财经历史分红数据 (第{start_page}~{end_page}页)")
        
        for page in range(start_page, end_page + 1):
            page_data = self.crawl_page(page)
            if page_data:
                self.all_data.extend(page_data)
                
            # 添加随机延迟，避免被封IP
            delay = random.uniform(2, 5)
            print(f"等待{delay:.2f}秒后继续")
            time.sleep(delay)
        
        print(f"\n爬取完成，共获取{len(self.all_data)}条数据")
    
    def save_to_excel(self, file_path="新浪财经历史分红数据.xlsx"):
        """
        将爬取的数据保存为Excel文件
        
        Args:
            file_path (str): 保存的文件路径
        """
        if not self.all_data:
            print("警告: 没有数据可保存")
            return
        
        try:
            df = pd.DataFrame(self.all_data)
            df.to_excel(file_path, index=False, engine='openpyxl')
            print(f"\n数据已成功保存到: {file_path}")
            print(f"数据形状: {df.shape}")
            print("数据前5行:")
            print(df.head())
        except Exception as e:
            print(f"保存文件时出错: {str(e)}")
    
    def run(self):
        """运行爬虫的主方法"""
        try:
            # 先爬取前5页进行测试
            self.crawl_all_pages(1, 5)
            if self.all_data:
                self.save_to_excel()
            else:
                print("警告: 未获取到任何数据")
        except Exception as e:
            print(f"爬虫运行过程中发生错误: {str(e)}")

if __name__ == "__main__":
    # 检查是否已安装必要的依赖包
    try:
        import requests
        import pandas
        from bs4 import BeautifulSoup
        import openpyxl
    except ImportError:
        print("检测到缺少必要的依赖包，建议先安装：")
        print("pip install requests beautifulsoup4 pandas openpyxl")
        exit(1)
    
    # 打印运行信息
    print("==== 新浪财经历史分红数据爬虫 ====")
    
    # 创建爬虫实例并运行
    try:
        crawler = SinaDividendCrawler()
        crawler.run()
        print("\n爬虫运行结束")
    except KeyboardInterrupt:
        print("\n爬虫被用户中断")
    except Exception as e:
        print(f"\n爬虫运行出错: {str(e)}")