import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
from tqdm import tqdm
import time
import logging

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 设置请求头
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}

def crawl_sina_dividend_data(start_page=1, end_page=100):
    """
    爬取新浪财经网站历史分红数据
    
    参数:
        start_page: 开始页码
        end_page: 结束页码
    
    返回:
        data_list: 包含所有爬取数据的列表
    """
    data_list = []
    
    logger.info(f"开始爬取新浪财经历史分红数据，共{end_page-start_page+1}页")
    
    for page in tqdm(range(start_page, end_page+1)):
        # 修复URL格式，去掉多余的反引号
        url = f"https://vip.stock.finance.sina.com.cn/q/go.php/vInvestConsult/kind/lsfh/index.phtml?p={page}"
        
        try:
            # 发送请求
            response = requests.get(url, headers=headers, timeout=10)
            response.encoding = 'gb2312'  # 设置编码为gb2312
            
            # 解析HTML
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # 查找表格的多种方法
            table = None
            # 方法1：通过id查找
            if not table:
                table = soup.find('table', {'id': 'dataTable'})
            # 方法2：通过class查找
            if not table:
                table = soup.find('table', {'class': 'list_table'})
            # 方法3：查找所有表格并尝试匹配内容
            if not table:
                tables = soup.find_all('table')
                for tbl in tables:
                    # 检查表格是否包含'代码'和'名称'等关键字
                    if tbl.find('td', string='代码') or tbl.find('th', string='代码'):
                        table = tbl
                        break
            
            if not table:
                logger.warning(f"第{page}页未找到数据表格，跳过")
                continue
            
            # 获取表格行
            rows = table.find_all('tr')[1:]  # 跳过表头
            
            for row in rows:
                cols = row.find_all('td')
                if len(cols) >= 9:
                    # 提取数据
                    code = cols[0].text.strip()
                    name = cols[1].text.strip()
                    listing_date = cols[2].text.strip()
                    cumulative_dividend = cols[3].text.strip()
                    annual_dividend = cols[4].text.strip()
                    dividend_count = cols[5].text.strip()
                    financing_amount = cols[6].text.strip()
                    financing_count = cols[7].text.strip()
                    detail_link = cols[8].find('a')['href'] if cols[8].find('a') else ''
                    
                    # 添加到数据列表
                    data_list.append({
                        '代码': code,
                        '名称': name,
                        '上市日期': listing_date,
                        '累计股息(%)': cumulative_dividend,
                        '年均股息(%)': annual_dividend,
                        '分红次数': dividend_count,
                        '融资总额(亿元)': financing_amount,
                        '融资次数': financing_count,
                        '详细': detail_link
                    })
            
            logger.info(f"第{page}页爬取成功，新增{len(rows)}条数据")
            # 添加延迟，避免被封
            time.sleep(1)
            
        except Exception as e:
            logger.error(f"爬取第{page}页时出错: {str(e)}")
            # 出错时等待更长时间
            time.sleep(3)
    
    logger.info(f"爬取完成，共获取{len(data_list)}条数据")
    return data_list

def save_to_excel(data_list, output_file="新浪财经历史分红数据.xlsx"):
    """
    将爬取的数据保存到Excel文件
    
    参数:
        data_list: 爬取的数据列表
        output_file: 输出文件名
    """
    if not data_list:
        logger.warning("没有数据可保存")
        return False
    
    try:
        # 转换为DataFrame
        df = pd.DataFrame(data_list)
        
        # 保存到Excel
        df.to_excel(output_file, index=False, engine='openpyxl')
        
        logger.info(f"数据已成功保存到 {output_file}，共{len(df)}条记录")
        return True
        
    except Exception as e:
        logger.error(f"保存数据时出错: {str(e)}")
        return False

def main():
    """
    主函数，执行爬取和保存操作
    """
    # 爬取数据
    data_list = crawl_sina_dividend_data(start_page=1, end_page=100)
    
    # 保存数据
    if data_list:
        save_to_excel(data_list)
    else:
        logger.warning("未能获取到任何数据")

if __name__ == "__main__":
    main()