#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
新浪财经历史分红数据爬取工具
该工具用于爬取新浪财经网站历史分红数据第1~100页的内容，并将其存储为本地Excel文件
"""

import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import random
from tqdm import tqdm

class SinaDividendCrawler:
    """新浪财经历史分红数据爬虫类"""
    
    def __init__(self):
        """初始化爬虫"""
        self.base_url = "https://vip.stock.finance.sina.com.cn/q/go.php/vInvestConsult/kind/lsfh/index.phtml?p="
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
        }
        self.all_data = []
    
    def crawl_page(self, page_num):
        """
        爬取指定页码的数据
        
        Args:
            page_num (int): 页码
            
        Returns:
            list: 该页面的数据列表
        """
        try:
            url = self.base_url + str(page_num)
            response = requests.get(url, headers=self.headers, timeout=10)
            response.encoding = 'gb2312'  # 新浪财经使用gb2312编码
            
            soup = BeautifulSoup(response.text, 'html.parser')
            table = soup.find('table', {'class': 'list_table'})
            
            if not table:
                print(f"第{page_num}页未找到数据表")
                return []
            
            rows = table.find_all('tr')[1:]  # 跳过表头
            page_data = []
            
            for row in rows:
                cols = row.find_all('td')
                if len(cols) >= 9:  # 确保有足够的列
                    code = cols[0].text.strip()
                    name = cols[1].text.strip()
                    listing_date = cols[2].text.strip()
                    total_dividend = cols[3].text.strip()
                    annual_dividend = cols[4].text.strip()
                    dividend_times = cols[5].text.strip()
                    financing_total = cols[6].text.strip()
                    financing_times = cols[7].text.strip()
                    detail = cols[8].find('a')['href'] if cols[8].find('a') else ''
                    
                    page_data.append({
                        '代码': code,
                        '名称': name,
                        '上市日期': listing_date,
                        '累计股息(%)': total_dividend,
                        '年均股息(%)': annual_dividend,
                        '分红次数': dividend_times,
                        '融资总额(亿)': financing_total,
                        '融资次数': financing_times,
                        '详细': detail
                    })
            
            return page_data
            
        except Exception as e:
            print(f"爬取第{page_num}页时出错: {str(e)}")
            return []
    
    def crawl_all_pages(self, start_page=1, end_page=100):
        """
        爬取指定范围的所有页面数据
        
        Args:
            start_page (int): 起始页码
            end_page (int): 结束页码
        """
        print(f"开始爬取新浪财经历史分红数据 (第{start_page}~{end_page}页)")
        
        for page in tqdm(range(start_page, end_page + 1)):
            page_data = self.crawl_page(page)
            if page_data:
                self.all_data.extend(page_data)
                
            # 添加随机延迟，避免被封IP
            time.sleep(random.uniform(1, 3))
        
        print(f"爬取完成，共获取{len(self.all_data)}条数据")
    
    def save_to_excel(self, file_path="新浪财经历史分红数据.xlsx"):
        """
        将爬取的数据保存为Excel文件
        
        Args:
            file_path (str): 保存的文件路径
        """
        if not self.all_data:
            print("没有数据可保存")
            return
        
        # 确保文件保存在chapter3目录下
        import os
        file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), file_path)
        
        try:
            df = pd.DataFrame(self.all_data)
            df.to_excel(file_path, index=False, engine='openpyxl')
            print(f"数据已成功保存到: {file_path}")
        except Exception as e:
            print(f"保存文件时出错: {str(e)}")
    
    def run(self):
        """运行爬虫的主方法"""
        self.crawl_all_pages(1, 100)  # 恢复为爬取1-100页
        self.save_to_excel()

if __name__ == "__main__":
    # 检查是否已安装必要的依赖包
    try:
        import requests
        import pandas
        import beautifulsoup4
        import tqdm
        import openpyxl
    except ImportError:
        print("检测到缺少必要的依赖包，建议先安装：")
        print("pip install requests beautifulsoup4 pandas tqdm openpyxl")
    
    # 创建爬虫实例并运行
    crawler = SinaDividendCrawler()
    crawler.run()