import baostock as bs
import pandas as pd
import time
import os
from datetime import datetime, timedelta

class HS300DataProcessor:
    def __init__(self, start_date, end_date, output_dir="data"):
        """初始化沪深300数据处理器"""
        self.start_date = start_date
        self.end_date = end_date
        self.output_dir = output_dir
        self.hs300_codes = []

        # 创建输出目录
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        print(f"数据将保存在目录: {os.path.abspath(self.output_dir)}")

    def login(self):
        """登录Baostock系统"""
        lg = bs.login()
        if lg.error_code != '0':
            raise Exception(f"登录失败：{lg.error_msg}")
        print("Baostock登录成功")

    def logout(self):
        """退出Baostock系统"""
        bs.logout()
        print("Baostock退出成功")

    def get_hs300_codes(self):
        """获取沪深300成分股代码列表"""
        print("获取沪深300成分股列表...")
        rs = bs.query_hs300_stocks()
        if rs.error_code != '0':
            raise Exception(f"获取沪深300成分股失败: {rs.error_msg}")

        # 提取成分股数据并去重
        hs300_stocks = [row for row in rs.data]
        result = pd.DataFrame(hs300_stocks, columns=rs.fields)
        self.hs300_codes = result['code'].unique().tolist()
        
        if not self.hs300_codes:
            raise Exception("未获取到任何沪深300成分股数据")

        print(f"成功获取{len(self.hs300_codes)}只沪深300成分股")
        result.to_csv(os.path.join(self.output_dir, 'hs300_stocks_full.csv'), encoding="utf-8-sig", index=False)
        return self.hs300_codes

    def fetch_daily_data(self):
        """获取成分股日线数据（收盘价、成交量）"""
        print("开始获取日线数据...")
        daily_dfs = []
        for i, code in enumerate(self.hs300_codes):
            # 获取单只股票日线数据
            rs = bs.query_history_k_data_plus(
                code, "date,code,close,volume",
                start_date=self.start_date, end_date=self.end_date,
                frequency="d", adjustflag="3"  # 不复权
            )
            df = rs.get_data()
            daily_dfs.append(df)
            
            # 进度提示与限流
            if (i + 1) % 50 == 0:
                print(f"已获取{i+1}/{len(self.hs300_codes)}只股票的日线数据")
                time.sleep(0.5)
        
        # 合并所有股票数据
        daily_all = pd.concat(daily_dfs, ignore_index=True)
        daily_all.to_csv(os.path.join(self.output_dir, 'hs300_daily_raw.csv'), encoding="utf-8-sig", index=False)
        print("日线数据获取完成")
        return daily_all

    def fetch_industry_data(self):
        """获取成分股行业分类数据"""
        print("获取行业分类数据...")
        industry_list = []
        rs_fields = None
        
        for i, code in enumerate(self.hs300_codes):
            rs = bs.query_stock_industry(code=code)
            # 记录字段名（仅首次获取）
            if rs_fields is None and hasattr(rs, 'fields'):
                rs_fields = rs.fields
            
            # 处理返回结果
            if rs.error_code == '0':
                while rs.next():
                    industry_list.append(rs.get_row_data())
            
            # 进度提示与限流
            if (i + 1) % 50 == 0:
                print(f"已获取{i+1}/{len(self.hs300_codes)}只股票的行业数据")
                time.sleep(0.5)
        
        # 处理空数据情况
        if not industry_list:
            print("警告: 未获取到任何行业数据。")
            return pd.DataFrame()

        # 去重并保存
        industry_df = pd.DataFrame(industry_list, columns=rs_fields)
        hs300_industry = industry_df.drop_duplicates(subset=['code'], keep='first')[['code', 'industry']]
        hs300_industry.to_csv(os.path.join(self.output_dir, 'hs300_industry_raw.csv'), encoding="utf-8-sig", index=False)
        print("行业数据获取完成")
        return hs300_industry

    def clean_daily_data(self, daily_raw):
        """清洗日线数据"""
        print("清洗日线数据...")
        daily = daily_raw.copy()
        
        # 数据类型转换
        daily['date'] = pd.to_datetime(daily['date'])
        daily['close'] = pd.to_numeric(daily['close'], errors='coerce')
        daily['volume'] = pd.to_numeric(daily['volume'], errors='coerce')
        
        # 排序并填充缺失值
        daily = daily.sort_values(by=['code', 'date'])
        daily['close'] = daily.groupby('code')['close'].ffill()  # 前向填充收盘价
        daily['volume'] = daily['volume'].fillna(0)  # 成交量缺失填0
        
        # 移除仍有缺失值的记录
        daily.dropna(subset=['close'], inplace=True)
        daily_clean = daily[daily['code'].isin(daily['code'].unique())]
        
        # 保存清洗后的数据
        daily_clean.to_csv(os.path.join(self.output_dir, 'hs300_daily_clean.csv'), encoding="utf-8-sig", index=False)
        return daily_clean

    def clean_industry_data(self, industry_raw):
        """清洗行业数据"""
        print("清洗行业数据...")
        if industry_raw.empty:
            return industry_raw
        
        # 过滤无效行业信息
        industry_clean = industry_raw[
            industry_raw['industry'].notna() & 
            (industry_raw['industry'] != '')
        ].copy()
        
        # 保存清洗后的数据
        industry_clean.to_csv(os.path.join(self.output_dir, 'hs300_industry_clean.csv'), encoding="utf-8-sig", index=False)
        return industry_clean

    def build_graph_data(self, daily_clean, industry_clean):
        """构建图模型所需的节点和边数据（修复列名冲突问题）"""
        print("构建图模型数据 (使用平均成交额替代市值)...")

        # --- 1. 构建股票节点属性 ---
        # 计算平均成交额（替代市值作为衡量股票规模的指标）
        daily_clean['turnover'] = daily_clean['close'] * daily_clean['volume']  # 成交额=收盘价×成交量
        avg_turnover = daily_clean.groupby('code')['turnover'].mean().reset_index()
        avg_turnover['avg_turnover_billion'] = avg_turnover['turnover'] / 100000000  # 转换为亿元

        # --- 2. 计算波动率（股票风险指标） ---
        close_pivot = daily_clean.pivot(index='date', columns='code', values='close')
        volatility = close_pivot.std().reset_index()
        volatility.columns = ['code', 'volatility']  # 强制设置列名，避免版本差异问题

        # --- 3. 合并节点数据并去重 ---
        # 合并行业、平均成交额、波动率数据
        stock_nodes = pd.merge(industry_clean, avg_turnover[['code', 'avg_turnover_billion']], on='code', how='inner')
        stock_nodes = pd.merge(stock_nodes, volatility, on='code', how='inner')
        
        # 关键去重步骤，确保股票代码唯一
        stock_nodes.drop_duplicates(subset=['code'], keep='first', inplace=True)

        # --- 4. 保存股票节点数据 ---
        stock_nodes.rename(columns={'code': 'stock_code'}, inplace=True)
        stock_nodes.to_csv(os.path.join(self.output_dir, 'graph_stock_nodes.csv'), encoding="utf-8-sig", index=False)
        print("已生成节点文件: graph_stock_nodes.csv")

        # --- 5. 计算股票相关性并构建边数据（修复核心冲突点） ---
        print("正在计算股票相关性，这可能需要一些时间...")
        all_node_codes = stock_nodes['stock_code'].unique()
        
        # 筛选有效股票数据，排除已被清洗掉的股票
        close_pivot_filtered = close_pivot[all_node_codes]
        close_pivot_filtered.dropna(how='all', inplace=True)  # 移除全为NaN的行
        
        # 关键修复：明确设置轴名称，避免后续堆叠时的列名冲突
        close_pivot_filtered.index.name = 'date'
        close_pivot_filtered.columns.name = 'code'
        
        # 计算相关性矩阵
        corr_matrix = close_pivot_filtered.corr()
        print("相关性矩阵计算完成。")

        # --- 6. 处理相关性矩阵生成边列表 ---
        # 重置轴名称，避免堆叠后出现重复列名
        corr_matrix = corr_matrix.rename_axis(None, axis=0).rename_axis(None, axis=1)
        corr_long = corr_matrix.stack().reset_index()
        corr_long.columns = ['stock1', 'stock2', 'corr']  # 明确指定列名
        
        # 筛选强相关股票对（相关系数绝对值>0.6）并去重
        strong_corr = corr_long[
            (corr_long['corr'].abs() > 0.6) & 
            (corr_long['stock1'] < corr_long['stock2'])  # 避免重复边（A-B与B-A）
        ].copy()
        
        # 添加行业信息
        stock_to_industry = stock_nodes.set_index('stock_code')['industry']
        strong_corr['industry'] = strong_corr['stock1'].map(stock_to_industry)
        
        # 保存股票关联边数据
        strong_corr.to_csv(os.path.join(self.output_dir, 'graph_stock_links.csv'), encoding="utf-8-sig", index=False)
        print("已生成股票关联边文件: graph_stock_links.csv")

        # --- 7. 构建股票-行业归属边 ---
        industry_links = stock_nodes[['stock_code', 'industry']].copy()
        industry_links['weight'] = 1  # 归属关系权重
        industry_links.to_csv(os.path.join(self.output_dir, 'graph_industry_links.csv'), encoding="utf-8-sig", index=False)
        print("已生成股票行业归属边文件: graph_industry_links.csv")
        print("图模型数据构建完成")

    def run_pipeline(self):
        """运行完整数据处理流程"""
        try:
            self.login()
            self.get_hs300_codes()
            
            # 获取日线数据并检查有效性
            daily_raw = self.fetch_daily_data()
            if daily_raw.empty:
                raise Exception("获取到的日线数据为空，请检查网络连接或日期范围是否有交易日。")

            # 获取行业数据
            industry_raw = self.fetch_industry_data()
            
            # 数据清洗
            daily_clean = self.clean_daily_data(daily_raw)
            industry_clean = self.clean_industry_data(industry_raw)
            
            # 检查清洗后的数据有效性
            if daily_clean.empty or industry_clean.empty:
                raise Exception("数据清洗后存在空数据集，无法继续处理。")

            # 构建图模型数据
            self.build_graph_data(daily_clean, industry_clean)
            print("\n所有数据处理完成，结果保存在", os.path.abspath(self.output_dir))

        except Exception as e:
            print(f"\n处理过程出错：{str(e)}")
        finally:
            self.logout()


if __name__ == "__main__":
    # 计算时间范围（过去一年，排除最近2天数据）
    # 计算时间从当前时间的前两天开始
    # 如果前两天有节假日，则找最近的倒数第二个交易日
    end_date_dt = datetime.now() - timedelta(days=2)
    start_date_dt = end_date_dt - timedelta(days=365)
    
    start_date_str = start_date_dt.strftime("%Y-%m-%d")
    end_date_str = end_date_dt.strftime("%Y-%m-%d")

    print(f"数据获取时间范围: {start_date_str} to {end_date_str}")
    
    # 初始化处理器并运行流程
    processor = HS300DataProcessor(
        start_date=start_date_str,
        end_date=end_date_str,
        output_dir="hs300_data"
    )
    processor.run_pipeline()
    