#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
新浪财经历史分红数据爬取脚本
功能：爬取新浪财经网站历史分红数据第1~100页的内容，并将其存储为本地文件
"""

import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import logging

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class SinaDividendCrawler:
    """新浪财经历史分红数据爬虫类"""
    
    def __init__(self):
        self.base_url = "https://vip.stock.finance.sina.com.cn/q/go.php/vInvestConsult/kind/dxfh/index.phtml"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
        }
        self.all_data = []
    
    def fetch_page(self, page_num):
        """获取指定页面的数据"""
        try:
            params = {
                "p": page_num
            }
            response = requests.get(self.base_url, headers=self.headers, params=params, timeout=10)
            response.encoding = 'gb2312'  # 设置正确的编码
            
            if response.status_code == 200:
                return response.text
            else:
                logger.error(f"获取页面 {page_num} 失败，状态码: {response.status_code}")
                return None
        except Exception as e:
            logger.error(f"获取页面 {page_num} 时发生异常: {str(e)}")
            return None
    
    def parse_page(self, html):
        """解析页面HTML，提取分红数据"""
        try:
            soup = BeautifulSoup(html, 'html.parser')
            table = soup.find('table', class_='list_table')
            
            if not table:
                logger.warning("未找到数据表格")
                return []
            
            rows = table.find_all('tr')[1:]  # 跳过表头
            page_data = []
            
            for row in rows:
                cols = row.find_all('td')
                if len(cols) >= 10:
                    # 提取数据
                    code = cols[1].text.strip()
                    name = cols[2].text.strip()
                    latest_price = cols[3].text.strip()
                    change_percent = cols[4].text.strip()
                    listing_date = cols[5].text.strip()
                    total_dividend = cols[6].text.strip()
                    annual_dividend = cols[7].text.strip()
                    dividend_times = cols[8].text.strip()
                    financing_amount = cols[9].text.strip()
                    detail = cols[10].text.strip()
                    
                    # 存储数据
                    data_row = {
                        "代码": code,
                        "名称": name,
                        "最新价(元)": latest_price,
                        "涨跌幅(%)": change_percent,
                        "上市日期": listing_date,
                        "累计股息(%)": total_dividend,
                        "年均股息(%)": annual_dividend,
                        "分红次数": dividend_times,
                        "融资总额(亿元)": financing_amount,
                        "详细": detail
                    }
                    page_data.append(data_row)
            
            return page_data
        except Exception as e:
            logger.error(f"解析页面时发生异常: {str(e)}")
            return []
    
    def crawl_all_pages(self, start_page=1, end_page=100):
        """爬取指定范围的所有页面"""
        logger.info(f"开始爬取新浪财经历史分红数据，从第{start_page}页到第{end_page}页")
        
        for page_num in range(start_page, end_page + 1):
            logger.info(f"正在爬取第 {page_num}/{end_page} 页")
            
            # 获取页面HTML
            html = self.fetch_page(page_num)
            if not html:
                # 如果获取失败，等待一段时间后重试
                logger.info(f"重试第 {page_num} 页")
                time.sleep(5)
                html = self.fetch_page(page_num)
                if not html:
                    continue
            
            # 解析页面数据
            page_data = self.parse_page(html)
            self.all_data.extend(page_data)
            
            # 控制爬取频率
            time.sleep(2)
        
        logger.info(f"爬取完成，共获取到 {len(self.all_data)} 条数据")
    
    def save_to_excel(self, file_path="新浪财经历史分红数据.xlsx"):
        """将爬取的数据保存为Excel文件"""
        try:
            if not self.all_data:
                logger.warning("没有数据可保存")
                return False
            
            df = pd.DataFrame(self.all_data)
            df.to_excel(file_path, index=False)
            logger.info(f"数据已成功保存到 {file_path}")
            return True
        except Exception as e:
            logger.error(f"保存数据时发生异常: {str(e)}")
            return False


if __name__ == "__main__":
    """主函数"""
    crawler = SinaDividendCrawler()
    
    try:
        # 爬取第1-100页数据
        crawler.crawl_all_pages(start_page=1, end_page=100)
        
        # 保存数据到Excel文件
        if crawler.all_data:
            crawler.save_to_excel()
        else:
            logger.warning("未能获取到任何数据")
            
    except KeyboardInterrupt:
        logger.info("程序被用户中断")
    except Exception as e:
        logger.error(f"程序运行时发生异常: {str(e)}")