import requests
import pandas as pd
from bs4 import BeautifulSoup
import time
import re

def get_stock_pe_ratio_sina(stock_code):
    """
    从新浪财经获取个股历史市盈率数据
    :param stock_code: 股票代码，如 '600519'
    :return: DataFrame包含历史市盈率数据
    """
    url = f"https://money.finance.sina.com.cn/corp/go.php/vFD_FinancialGuideLine/stockid/{stock_code}/displaytype/4.phtml"
    
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
        'Accept-Encoding': 'gzip, deflate',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1',
    }
    
    try:
        print(f"正在获取股票 {stock_code} 的市盈率数据...")
        response = requests.get(url, headers=headers, timeout=15)
        response.encoding = 'gbk'  # 新浪财经使用gbk编码
        
        if response.status_code != 200:
            print(f"请求失败，状态码：{response.status_code}")
            return None
            
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 查找数据表格
        table = soup.find('table', {'id': 'BalanceSheetNewTable0'})
        
        if not table:
            print("未找到市盈率数据表格")
            # 尝试其他可能的表格
            tables = soup.find_all('table', {'class': 'list_table'})
            if tables:
                table = tables[0]
                print("找到list_table表格")
            else:
                print("未找到任何数据表格")
                return None
        
        # 解析表头
        headers = []
        header_row = table.find('tr')
        if header_row:
            headers = [th.get_text(strip=True) for th in header_row.find_all('th')]
            print(f"表头列数: {len(headers)}")
        
        # 解析数据行
        data = []
        rows = table.find_all('tr')[1:]  # 跳过表头行
        
        for i, row in enumerate(rows):
            cols = row.find_all('td')
            if len(cols) > 1:
                row_data = [col.get_text(strip=True) for col in cols]
                print(f"第{i+1}行数据列数: {len(row_data)}")
                data.append(row_data)
        
        if not data:
            print("未提取到数据")
            return None
        
        # 检查表头和数据列数是否匹配
        if len(headers) != len(data[0]):
            print(f"警告: 表头列数({len(headers)})与数据列数({len(data[0])})不匹配")
            # 自动生成列名
            headers = [f'col_{i}' for i in range(len(data[0]))]
            headers[0] = '报告日期'  # 第一列通常是日期
        
        # 创建DataFrame
        df = pd.DataFrame(data, columns=headers)
        
        # 数据清洗
        df = df.replace('--', pd.NA)  # 将"--"替换为NaN
        df = df.dropna(how='all')  # 删除全为空的行
        
        print(f"成功获取 {len(df)} 条历史数据")
        return df
        
    except Exception as e:
        print(f"获取数据时出错：{e}")
        import traceback
        traceback.print_exc()
        return None

def debug_webpage_structure(stock_code):
    """
    调试网页结构，帮助分析表格结构
    """
    url = f"https://money.finance.sina.com.cn/corp/go.php/vFD_FinancialGuideLine/stockid/{stock_code}/displaytype/4.phtml"
    
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
    }
    
    try:
        response = requests.get(url, headers=headers, timeout=15)
        response.encoding = 'gbk'
        
        if response.status_code == 200:
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # 查找所有表格
            tables = soup.find_all('table')
            print(f"找到 {len(tables)} 个表格")
            
            for i, table in enumerate(tables):
                table_id = table.get('id', '无ID')
                table_class = table.get('class', '无class')
                print(f"表格 {i+1}: ID={table_id}, Class={table_class}")
                
                # 检查表格结构
                rows = table.find_all('tr')
                print(f"  行数: {len(rows)}")
                
                if rows:
                    # 检查表头
                    headers = [th.get_text(strip=True) for th in rows[0].find_all('th')]
                    print(f"  表头: {headers}")
                    
                    # 检查第一行数据
                    if len(rows) > 1:
                        first_data = [td.get_text(strip=True) for td in rows[1].find_all('td')]
                        print(f"  第一行数据: {first_data}")
            
            # 保存网页内容供分析
            with open(f'debug_{stock_code}.html', 'w', encoding='utf-8') as f:
                f.write(soup.prettify())
            print(f"网页内容已保存到 debug_{stock_code}.html")
            
        else:
            print(f"请求失败，状态码：{response.status_code}")
            
    except Exception as e:
        print(f"调试时出错：{e}")

def get_stock_pe_ratio_simple(stock_code):
    """
    简化版的市盈率数据获取，更稳定的方法
    """
    url = f"https://money.finance.sina.com.cn/corp/go.php/vFD_FinancialGuideLine/stockid/{stock_code}/displaytype/4.phtml"
    
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
    }
    
    try:
        print(f"正在获取股票 {stock_code} 的市盈率数据...")
        response = requests.get(url, headers=headers, timeout=15)
        response.encoding = 'gbk'
        
        if response.status_code != 200:
            print(f"请求失败，状态码：{response.status_code}")
            return None
            
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 直接查找所有包含数据的表格行
        data = []
        # 查找所有tr元素，跳过表头
        rows = soup.find_all('tr')
        
        for row in rows:
            cols = row.find_all('td')
            if len(cols) > 3:  # 至少有4列才认为是数据行
                row_data = [col.get_text(strip=True) for col in cols]
                data.append(row_data)
        
        if not data:
            print("未找到数据行")
            return None
        
        # 自动生成列名
        headers = [f'列{i+1}' for i in range(len(data[0]))]
        headers[0] = '报告日期'
        
        # 创建DataFrame
        df = pd.DataFrame(data, columns=headers)
        
        # 数据清洗
        df = df.replace('--', pd.NA)
        df = df.dropna(how='all')
        
        print(f"成功获取 {len(df)} 条数据")
        return df
        
    except Exception as e:
        print(f"获取数据时出错：{e}")
        return None

def analyze_pe_data(df, stock_name):
    """
    分析市盈率数据
    """
    if df is None or df.empty:
        print("无数据可分析")
        return
    
    print(f"\n=== {stock_name} 数据分析 ===")
    print(f"数据形状: {df.shape}")
    print(f"列名: {df.columns.tolist()}")
    
    # 显示前几行数据
    print("\n前5行数据:")
    print(df.head())
    
    # 尝试找出包含市盈率的列
    pe_columns = []
    for col in df.columns:
        if any(keyword in col for keyword in ['市盈', 'PE', '估值', '比率']):
            pe_columns.append(col)
        # 也可以通过数据内容判断
        elif not df.empty and any('市盈' in str(val) for val in df[col].head().values):
            pe_columns.append(col)
    
    if pe_columns:
        print(f"\n可能的市盈率列: {pe_columns}")
    else:
        print("\n未明确识别出市盈率列，所有列的数据类型:")
        for col in df.columns:
            print(f"  {col}: {df[col].dtype}")

# 使用示例
if __name__ == "__main__":
    # 先调试网页结构
    print("正在调试网页结构...")
    debug_webpage_structure("600519")
    
    print("\n" + "="*50)
    
    # 使用简化方法获取数据
    stock_code = "600519"
    pe_data = get_stock_pe_ratio_simple(stock_code)
    
    if pe_data is not None:
        print(f"\n贵州茅台({stock_code})数据获取成功！")
        print(f"数据形状: {pe_data.shape}")
        print(f"列名: {pe_data.columns.tolist()}")
        
        # 保存数据
        filename = f"{stock_code}_pe_ratio.csv"
        pe_data.to_csv(filename, encoding='utf-8-sig', index=False)
        print(f"数据已保存到 {filename}")
        
        # 数据分析
        analyze_pe_data(pe_data, "贵州茅台")
    else:
        print("获取数据失败")
    
    # 测试其他股票
    print("\n" + "="*50)
    print("测试其他股票...")
    
    test_stocks = ['000858', '600036']  # 五粮液、招商银行
    
    for code in test_stocks:
        data = get_stock_pe_ratio_simple(code)
        if data is not None:
            filename = f"{code}_pe_data.csv"
            data.to_csv(filename, encoding='utf-8-sig', index=False)
            print(f"股票 {code} 数据已保存，共 {len(data)} 条记录")
        else:
            print(f"股票 {code} 数据获取失败")
        time.sleep(1)  # 延迟1秒
