# -*- coding: utf-8 -*-
# Author   : ZhangQing
# Time     : 2025-07-08 6:43
# File     : rust_bridge.py
# Project  : risk-contagion-analysis
# Desc     :

import pandas as pd
import numpy as np
import networkx as nx
import statsmodels.api as sm
import logging
from sqlalchemy import create_engine


class PathwayAnalyzer:
    def __init__(self, config):
        """初始化传导路径分析器

        Args:
            config: 配置信息，包含数据库连接等
        """
        self.config = config
        self.logger = self._setup_logger()
        self.db_engine = create_engine(config['database_uri'])

    def _setup_logger(self):
        """设置日志记录器"""
        logger = logging.getLogger("PathwayAnalyzer")
        logger.setLevel(logging.INFO)
        handler = logging.StreamHandler()
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        handler.setFormatter(formatter)
        logger.addHandler(handler)
        return logger

    def load_supply_chain_data(self):
        """加载供应链关系数据

        Returns:
            DataFrame: 包含供应链关系的数据框
        """
        self.logger.info("加载供应链关系数据...")

        try:
            # 从数据库加载供应链关系
            query = """
            SELECT 
                supplier_code, 
                customer_code, 
                transaction_amount,
                year
            FROM supply_chain_relationships
            """
            supply_chain_df = pd.read_sql(query, self.db_engine)
            self.logger.info(f"成功加载供应链数据，记录数: {len(supply_chain_df)}")
            return supply_chain_df
        except Exception as e:
            self.logger.error(f"加载供应链数据失败: {str(e)}")
            # 返回空数据框
            return pd.DataFrame(columns=['supplier_code', 'customer_code', 'transaction_amount', 'year'])

    def load_financial_data(self):
        """加载企业财务数据

        Returns:
            DataFrame: 包含企业财务数据的数据框
        """
        self.logger.info("加载企业财务数据...")

        try:
            # 从数据库加载财务数据
            query = """
            SELECT 
                stock_code,
                year,
                tobins_q,
                debt_cost,
                vol_npgr,
                revenue,
                total_assets,
                roa,
                leverage,
                firm_age,
                board_size
            FROM financial_indicators
            """
            financial_df = pd.read_sql(query, self.db_engine)
            self.logger.info(f"成功加载财务数据，记录数: {len(financial_df)}")
            return financial_df
        except Exception as e:
            self.logger.error(f"加载财务数据失败: {str(e)}")
            # 返回空数据框
            return pd.DataFrame()

    def build_supply_chain_network(self, supply_chain_df, year=None):
        """构建供应链网络

        Args:
            supply_chain_df: 供应链关系数据框
            year: 特定年份，如果提供则只考虑该年份的数据

        Returns:
            nx.DiGraph: 供应链有向网络
        """
        self.logger.info(f"构建供应链网络，{'全部年份' if year is None else f'年份: {year}'}")

        # 筛选特定年份的数据(如果指定)
        if year is not None:
            supply_chain_df = supply_chain_df[supply_chain_df['year'] == year]

        # 创建有向图
        G = nx.DiGraph()

        # 添加边(供应商->客户)及权重(交易金额)
        for _, row in supply_chain_df.iterrows():
            supplier = row['supplier_code']
            customer = row['customer_code']
            weight = row['transaction_amount']

            G.add_edge(supplier, customer, weight=weight)

        self.logger.info(f"供应链网络构建完成，节点数: {G.number_of_nodes()}, 边数: {G.number_of_edges()}")
        return G

    def calculate_network_measures(self, G, risk_exposure_df):
        """计算网络指标并与风险暴露数据结合

        Args:
            G: 供应链网络
            risk_exposure_df: 风险暴露数据框

        Returns:
            DataFrame: 包含网络指标的风险暴露数据框
        """
        self.logger.info("计算网络指标...")

        # 计算中心性指标
        in_degree = dict(G.in_degree(weight='weight'))
        out_degree = dict(G.out_degree(weight='weight'))

        # 计算PageRank值(反映企业在网络中的重要性)
        pagerank = nx.pagerank(G, weight='weight')

        # 尝试计算介数中心性(可能耗时较长)
        try:
            betweenness = nx.betweenness_centrality(G, weight='weight', k=min(500, G.number_of_nodes()))
        except Exception as e:
            self.logger.warning(f"计算介数中心性时出错，使用近似计算: {str(e)}")
            # 使用近似算法
            betweenness = nx.approximate_current_flow_betweenness_centrality(G, weight='weight')

        # 将网络指标添加到数据框
        result_df = risk_exposure_df.copy()

        # 添加网络指标
        result_df['in_degree'] = result_df['stock_code'].map(in_degree).fillna(0)
        result_df['out_degree'] = result_df['stock_code'].map(out_degree).fillna(0)
        result_df['pagerank'] = result_df['stock_code'].map(pagerank).fillna(0)
        result_df['betweenness'] = result_df['stock_code'].map(betweenness).fillna(0)

        # 计算上下游连接比例
        result_df['upstream_ratio'] = result_df['out_degree'] / (
                    result_df['in_degree'] + result_df['out_degree']).replace(0, 1)
        result_df['downstream_ratio'] = result_df['in_degree'] / (
                    result_df['in_degree'] + result_df['out_degree']).replace(0, 1)

        self.logger.info("网络指标计算完成")
        return result_df

    def analyze_supply_chain_spillover(self, network_risk_df, financial_df):
        """分析供应链溢出效应

        Args:
            network_risk_df: 包含网络指标的风险暴露数据框
            financial_df: 企业财务数据

        Returns:
            dict: 包含分析结果的字典
        """
        self.logger.info("分析供应链溢出效应...")

        # 合并数据
        merged_df = pd.merge(
            network_risk_df,
            financial_df,
            on=['stock_code', 'year'],
            how='inner'
        )

        # 对数据进行标准化处理
        from sklearn.preprocessing import StandardScaler

        # 需要标准化的变量
        scale_vars = ['net_risk_exposure', 'in_degree', 'out_degree',
                      'pagerank', 'betweenness', 'upstream_ratio', 'downstream_ratio']

        scaler = StandardScaler()
        merged_df[scale_vars] = scaler.fit_transform(merged_df[scale_vars])

        # 控制变量
        controls = ['revenue', 'total_assets', 'roa', 'leverage', 'firm_age', 'board_size']

        # 模型1: 基础回归 - 风险暴露对企业价值(Tobin's Q)的影响
        X = sm.add_constant(merged_df[['net_risk_exposure'] + controls])
        y = merged_df['tobins_q']

        model1 = sm.OLS(y, X).fit(cov_type='cluster', cov_kwds={'groups': merged_df['stock_code']})

        # 模型2: 上游关联效应
        interaction1 = merged_df['net_risk_exposure'] * merged_df['upstream_ratio']
        merged_df['risk_upstream'] = interaction1

        X2 = sm.add_constant(merged_df[['net_risk_exposure', 'upstream_ratio', 'risk_upstream'] + controls])
        model2 = sm.OLS(y, X2).fit(cov_type='cluster', cov_kwds={'groups': merged_df['stock_code']})

        # 模型3: 下游关联效应
        interaction2 = merged_df['net_risk_exposure'] * merged_df['downstream_ratio']
        merged_df['risk_downstream'] = interaction2

        X3 = sm.add_constant(merged_df[['net_risk_exposure', 'downstream_ratio', 'risk_downstream'] + controls])
        model3 = sm.OLS(y, X3).fit(cov_type='cluster', cov_kwds={'groups': merged_df['stock_code']})

        # 模型4: 综合模型
        X4 = sm.add_constant(merged_df[['net_risk_exposure',
                                        'upstream_ratio', 'risk_upstream',
                                        'downstream_ratio', 'risk_downstream'] + controls])
        model4 = sm.OLS(y, X4).fit(cov_type='cluster', cov_kwds={'groups': merged_df['stock_code']})

        # 分析二级网络效应
        self.logger.info("分析二级网络溢出效应...")

        # 计算二级连接的风险暴露
        # 通过网络结构传递风险
        second_level_risk = {}

        # 对每个公司
        for company in merged_df['stock_code'].unique():
            # 查找直接连接的公司
            if company in network_risk_df['stock_code'].values:
                # 获取该公司的一级连接
                first_level = [n for n in
                               network_risk_df[(network_risk_df['stock_code'] == company)]['stock_code'].values]

                # 获取二级连接的公司(一级连接的连接)
                second_level = []
                for first in first_level:
                    second_level.extend(
                        [n for n in network_risk_df[(network_risk_df['stock_code'] == first)]['stock_code'].values])

                # 移除重复和一级连接
                second_level = list(set(second_level) - set(first_level) - {company})

                # 计算二级连接公司的平均风险暴露
                if second_level:
                    second_level_risk[company] = network_risk_df[
                        network_risk_df['stock_code'].isin(second_level)
                    ]['net_risk_exposure'].mean()
                else:
                    second_level_risk[company] = 0
            else:
                second_level_risk[company] = 0

        # 添加二级风险暴露到数据框
        merged_df['second_level_risk'] = merged_df['stock_code'].map(second_level_risk)

        # 模型5: 二级网络效应
        X5 = sm.add_constant(merged_df[['net_risk_exposure', 'second_level_risk'] + controls])
        model5 = sm.OLS(y, X5).fit(cov_type='cluster', cov_kwds={'groups': merged_df['stock_code']})

        # 整理结果
        results = {
            'basic_model': {
                'params': model1.params.to_dict(),
                'pvalues': model1.pvalues.to_dict(),
                'rsquared': model1.rsquared,
                'rsquared_adj': model1.rsquared_adj,
                'fvalue': model1.fvalue,
                'f_pvalue': model1.f_pvalue
            },
            'upstream_model': {
                'params': model2.params.to_dict(),
                'pvalues': model2.pvalues.to_dict(),
                'rsquared': model2.rsquared
            },
            'downstream_model': {
                'params': model3.params.to_dict(),
                'pvalues': model3.pvalues.to_dict(),
                'rsquared': model3.rsquared
            },
            'combined_model': {
                'params': model4.params.to_dict(),
                'pvalues': model4.pvalues.to_dict(),
                'rsquared': model4.rsquared
            },
            'second_level_model': {
                'params': model5.params.to_dict(),
                'pvalues': model5.pvalues.to_dict(),
                'rsquared': model5.rsquared
            }
        }

        # 提取主要发现
        key_findings = {
            'risk_impact': model1.params['net_risk_exposure'],
            'risk_pvalue': model1.pvalues['net_risk_exposure'],
            'upstream_effect': model2.params['risk_upstream'],
            'upstream_pvalue': model2.pvalues['risk_upstream'],
            'downstream_effect': model3.params['risk_downstream'],
            'downstream_pvalue': model3.pvalues['risk_downstream'],
            'second_level_effect': model5.params['second_level_risk'],
            'second_level_pvalue': model5.pvalues['second_level_risk'],
        }

        self.logger.info("供应链溢出效应分析完成")
        self.logger.info(
            f"主要发现: 风险对企业价值的影响系数 = {key_findings['risk_impact']:.4f} (p={key_findings['risk_pvalue']:.4f})")
        self.logger.info(
            f"下游客户关联度系数 = {key_findings['downstream_effect']:.4f} (p={key_findings['downstream_pvalue']:.4f})")
        self.logger.info(
            f"上游供应商关联度系数 = {key_findings['upstream_effect']:.4f} (p={key_findings['upstream_pvalue']:.4f})")
        self.logger.info(
            f"二级网络溢出效应系数 = {key_findings['second_level_effect']:.4f} (p={key_findings['second_level_pvalue']:.4f})")

        return {
            'results': results,
            'key_findings': key_findings,
            'data': merged_df
        }

    def analyze_reputation_mediation(self, network_risk_df, financial_df):
        """分析声誉折价中介效应

        Args:
            network_risk_df: 包含网络指标的风险暴露数据框
            financial_df: 企业财务数据

        Returns:
            dict: 包含分析结果的字典
        """
        self.logger.info("分析声誉折价中介效应...")

        # 合并数据
        merged_df = pd.merge(
            network_risk_df,
            financial_df,
            on=['stock_code', 'year'],
            how='inner'
        )

        # 中介变量: 债务成本(debt_cost)和利润波动性(vol_npgr)

        # 第一阶段: 风险暴露 -> 债务成本
        controls = ['revenue', 'total_assets', 'roa', 'leverage', 'firm_age']
        X1 = sm.add_constant(merged_df[['net_risk_exposure'] + controls])
        y1 = merged_df['debt_cost']

        model1 = sm.OLS(y1, X1).fit(cov_type='cluster', cov_kwds={'groups': merged_df['stock_code']})

        # 第二阶段: 风险暴露 -> 利润波动性
        X2 = sm.add_constant(merged_df[['net_risk_exposure'] + controls])
        y2 = merged_df['vol_npgr']

        model2 = sm.OLS(y2, X2).fit(cov_type='cluster', cov_kwds={'groups': merged_df['stock_code']})

        # 第三阶段: 风险暴露+中介变量 -> 企业价值(Tobin's Q)
        X3 = sm.add_constant(merged_df[['net_risk_exposure', 'debt_cost', 'vol_npgr'] + controls])
        y3 = merged_df['tobins_q']

        model3 = sm.OLS(y3, X3).fit(cov_type='cluster', cov_kwds={'groups': merged_df['stock_code']})

        # 对比: 风险暴露 -> 企业价值(不含中介变量)
        X0 = sm.add_constant(merged_df[['net_risk_exposure'] + controls])
        y0 = merged_df['tobins_q']

        model0 = sm.OLS(y0, X0).fit(cov_type='cluster', cov_kwds={'groups': merged_df['stock_code']})

        # 计算中介效应
        # 直接效应: model3中风险暴露的系数
        direct_effect = model3.params['net_risk_exposure']

        # 间接效应(通过债务成本): model1中风险暴露系数 * model3中债务成本系数
        indirect_effect1 = model1.params['net_risk_exposure'] * model3.params['debt_cost']

        # 间接效应(通过利润波动性): model2中风险暴露系数 * model3中利润波动性系数
        indirect_effect2 = model2.params['net_risk_exposure'] * model3.params['vol_npgr']

        # 总间接效应
        total_indirect_effect = indirect_effect1 + indirect_effect2

        # 总效应
        total_effect = model0.params['net_risk_exposure']

        # 中介效应比例
        mediation_proportion = total_indirect_effect / total_effect if total_effect != 0 else 0

        # Sobel检验(简化版)
        from scipy import stats

        # 债务成本路径的Sobel检验
        a = model1.params['net_risk_exposure']
        b = model3.params['debt_cost']
        sea = model1.bse['net_risk_exposure']
        seb = model3.bse['debt_cost']

        sobel_statistic1 = (a * b) / np.sqrt(b ** 2 * sea ** 2 + a ** 2 * seb ** 2)
        sobel_pvalue1 = 2 * (1 - stats.norm.cdf(abs(sobel_statistic1)))

        # 利润波动性路径的Sobel检验
        a = model2.params['net_risk_exposure']
        b = model3.params['vol_npgr']
        sea = model2.bse['net_risk_exposure']
        seb = model3.bse['vol_npgr']

        sobel_statistic2 = (a * b) / np.sqrt(b ** 2 * sea ** 2 + a ** 2 * seb ** 2)
        sobel_pvalue2 = 2 * (1 - stats.norm.cdf(abs(sobel_statistic2)))

        # 整理结果
        results = {
            'stage1_debt_cost': {
                'params': model1.params.to_dict(),
                'pvalues': model1.pvalues.to_dict(),
                'rsquared': model1.rsquared
            },
            'stage2_vol_npgr': {
                'params': model2.params.to_dict(),
                'pvalues': model2.pvalues.to_dict(),
                'rsquared': model2.rsquared
            },
            'stage3_combined': {
                'params': model3.params.to_dict(),
                'pvalues': model3.pvalues.to_dict(),
                'rsquared': model3.rsquared
            },
            'total_effect': {
                'params': model0.params.to_dict(),
                'pvalues': model0.pvalues.to_dict(),
                'rsquared': model0.rsquared
            }
        }

        # 提取主要发现
        key_findings = {
            'direct_effect': direct_effect,
            'indirect_effect_debt': indirect_effect1,
            'indirect_effect_vol': indirect_effect2,
            'total_indirect_effect': total_indirect_effect,
            'total_effect': total_effect,
            'mediation_proportion': mediation_proportion,
            'sobel_statistic_debt': sobel_statistic1,
            'sobel_pvalue_debt': sobel_pvalue1,
            'sobel_statistic_vol': sobel_statistic2,
            'sobel_pvalue_vol': sobel_pvalue2
        }

        self.logger.info("声誉折价中介效应分析完成")
        self.logger.info(f"直接效应: {direct_effect:.4f}")
        self.logger.info(f"通过债务成本的间接效应: {indirect_effect1:.4f} (Sobel p={sobel_pvalue1:.4f})")
        self.logger.info(f"通过利润波动性的间接效应: {indirect_effect2:.4f} (Sobel p={sobel_pvalue2:.4f})")
        self.logger.info(f"中介效应比例: {mediation_proportion:.2%}")

        return {
            'results': results,
            'key_findings': key_findings,
            'data': merged_df
        }
