import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import shutil
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_auc_score
from torch_geometric.data import Data
from torch_geometric.nn import GINConv, MLP


# 定义注意力融合模块，用于融合图网络、金融数据和文本数据的特征
class AttentionFusion(nn.Module):
    def __init__(self, dim):
        super().__init__()
        # 注意力机制：将三种特征拼接后通过线性层和激活函数计算权重
        self.attention = nn.Sequential(
            nn.Linear(dim * 3, dim),
            nn.Tanh(),
            nn.Linear(dim, 3),
            nn.Softmax(dim=1)
        )

    def forward(self, gin_out, fin_out, text_out):
        # 将三种特征拼接
        combined = torch.cat([gin_out, fin_out, text_out], dim=1)
        # 计算注意力权重
        weights = self.attention(combined)
        # 对特征进行加权融合
        weighted = torch.stack([gin_out, fin_out, text_out], dim=2)
        return torch.sum(weighted * weights.unsqueeze(1), dim=2)


# 定义门控机制，用于增强特征表达
class Gate(nn.Module):
    def __init__(self, dim):
        super().__init__()
        # 门控网络：通过线性层和Sigmoid函数生成门控权重
        self.gate = nn.Sequential(
            nn.Linear(dim, dim),
            nn.Sigmoid()
        )

    def forward(self, x):
        # 应用门控机制，增强特征
        return x * self.gate(x)


# 定义改进的三通道融合GIN模型，整合图网络、金融数据和文本数据
class ImprovedTripleChannelFusionGIN(nn.Module):
    def __init__(self, attr_dim, fin_dim, text_dim, hidden_dim=64):
        super().__init__()
        # 图网络部分：使用GINConv进行节点特征更新
        self.mlp1 = MLP([attr_dim, hidden_dim, hidden_dim], norm='batch_norm', dropout=0.2)
        self.gin1 = GINConv(self.mlp1)
        self.mlp2 = MLP([hidden_dim, hidden_dim, hidden_dim], norm='batch_norm', dropout=0.2)
        self.gin2 = GINConv(self.mlp2)
        self.mlp3 = MLP([hidden_dim, hidden_dim, 32], norm='batch_norm', dropout=0.2)
        self.gin3 = GINConv(self.mlp3)
        self.gin_bn1 = nn.BatchNorm1d(hidden_dim)
        self.gin_bn2 = nn.BatchNorm1d(hidden_dim)
        self.gin_dropout = nn.Dropout(0.2)

        # 金融数据处理分支
        self.fin_mlp = nn.Sequential(
            nn.Linear(fin_dim, hidden_dim),
            nn.BatchNorm1d(hidden_dim),
            nn.LeakyReLU(),
            nn.Dropout(0.2),
            nn.Linear(hidden_dim, 32)
        )
        self.fin_gate = Gate(32)

        # 文本数据处理分支
        self.text_mlp = nn.Sequential(
            nn.Linear(text_dim, hidden_dim),
            nn.BatchNorm1d(hidden_dim),
            nn.LeakyReLU(),
            nn.Dropout(0.2),
            nn.Linear(hidden_dim, 32)
        )
        self.text_gate = Gate(32)

        # 注意力融合模块
        self.fusion = AttentionFusion(32)

        # 分类器：将融合特征映射到最终输出
        self.classifier = nn.Sequential(
            nn.BatchNorm1d(32),
            nn.LeakyReLU(),
            nn.Dropout(0.2),
            nn.Linear(32, 16),
            nn.LeakyReLU(),
            nn.Linear(16, 1)
        )

    def forward(self, attr_x, edge_index, fin_x, text_x):
        # 图网络处理：通过三层GINConv更新节点特征
        g = F.relu(self.gin_bn1(self.gin1(attr_x, edge_index)))
        g_res = self.gin_dropout(g)
        g = F.relu(self.gin_bn2(self.gin2(g_res, edge_index))) + g_res
        g = self.gin3(g, edge_index)

        # 金融数据处理：通过MLP和门控机制
        f = self.fin_gate(self.fin_mlp(fin_x))
        # 文本数据处理：通过MLP和门控机制
        t = self.text_gate(self.text_mlp(text_x))

        # 融合三种特征
        fused = self.fusion(g, f, t)
        # 分类预测
        return self.classifier(fused)


# 加载和预处理数据
def load_data():
    # 读取关系数据、属性数据、标签数据、金融数据和TF-IDF特征
    edge_df = pd.read_excel("net_train_data_relationship.xlsx")
    attr_df = pd.read_excel("net_train_data_attribute.xlsx")
    label_df = pd.read_excel("label_train_data.xlsx")
    fin_df = pd.read_excel("fin_train_data.xlsx")
    tfidf_df = pd.read_csv("tfidf_train_features.csv", index_col=0)

    # 创建公司ID到索引的映射
    company_ids = sorted(set(edge_df['company_id']) | set(edge_df['related_company_id']))
    cid_map = {cid: i for i, cid in enumerate(company_ids)}

    # 构建边索引（图的邻接关系）
    edge_index = torch.tensor([
        [cid_map[c] for c in edge_df['company_id']],
        [cid_map[c] for c in edge_df['related_company_id']]
    ], dtype=torch.long)

    # 处理属性数据：过滤、排序并转换为张量
    attr_df = attr_df[attr_df['company_id'].isin(cid_map)]
    attr_df['node_idx'] = attr_df['company_id'].map(cid_map).astype(int)
    attr_df = attr_df.dropna().sort_values('node_idx')
    x_attr = torch.tensor(attr_df.drop(columns=['company_id', 'node_idx']).values, dtype=torch.float)

    # 处理金融数据：填充缺失值并标准化
    fin_df = fin_df[fin_df['company_id'].isin(cid_map)]
    fin_df['node_idx'] = fin_df['company_id'].map(cid_map).astype(int)
    fin_df = fin_df.dropna()
    fin_dim = fin_df.drop(columns=['company_id', 'node_idx']).shape[1]
    x_fin_all = np.zeros((len(cid_map), fin_dim))
    for _, row in fin_df.iterrows():
        idx = int(row['node_idx'])
        vec = row.drop(['company_id', 'node_idx']).to_numpy(dtype=np.float32)
        x_fin_all[idx] = vec
    x_fin_all = StandardScaler().fit_transform(x_fin_all)
    x_fin_tensor = torch.tensor(x_fin_all, dtype=torch.float)

    # 处理文本数据：填充TF-IDF特征
    tfidf_dim = tfidf_df.shape[1]
    x_text_all = np.zeros((len(cid_map), tfidf_dim))
    for cid, vec in tfidf_df.iterrows():
        if cid in cid_map:
            idx = cid_map[cid]
            x_text_all[idx] = vec.values
    x_text_tensor = torch.tensor(x_text_all, dtype=torch.float)

    # 处理标签数据：为未标记节点填充-1
    label_df = label_df[label_df['company_id'].isin(cid_map)]
    label_df['node_idx'] = label_df['company_id'].map(cid_map)
    label_tensor = torch.full((len(cid_map), 1), -1.0)
    for _, row in label_df.iterrows():
        label_tensor[int(row['node_idx'])] = row['label']

    # 封装图数据对象
    data = Data(x=x_attr, edge_index=edge_index, y=label_tensor)
    return data, x_fin_tensor, x_text_tensor, label_df


# 训练模型
def train_model(data, x_fin_tensor, x_text_tensor, label_df, device):
    # 将数据移动到指定设备（CPU或GPU）
    data = data.to(device)
    x_fin_tensor = x_fin_tensor.to(device)
    x_text_tensor = x_text_tensor.to(device)

    # 获取有标签的节点索引
    labeled_idx = (data.y[:, 0] != -1).nonzero(as_tuple=True)[0]

    # 初始化模型
    model = ImprovedTripleChannelFusionGIN(
        attr_dim=data.x.shape[1],
        fin_dim=x_fin_tensor.shape[1],
        text_dim=x_text_tensor.shape[1]
    ).to(device)

    # 定义优化器、学习率调度器和损失函数
    optimizer = torch.optim.AdamW(model.parameters(), lr=0.001, weight_decay=1e-5)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=50)
    loss_fn = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([10.0], device=device))

    best_auc, patience, counter = 0, 20, 0

    # 训练循环
    for epoch in range(300):
        model.train()
        out = model(data.x, data.edge_index, x_fin_tensor, x_text_tensor)
        loss = loss_fn(out[labeled_idx], data.y[labeled_idx])

        optimizer.zero_grad()
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
        optimizer.step()
        scheduler.step()

        # 评估模型
        model.eval()
        with torch.no_grad():
            logits = model(data.x, data.edge_index, x_fin_tensor, x_text_tensor)[labeled_idx]
            probs = torch.sigmoid(logits).cpu().numpy()
            auc = roc_auc_score(data.y[labeled_idx].cpu().numpy(), probs)

        if epoch % 10 == 0:
            print(f"Epoch {epoch + 1}/300, Loss: {loss.item():.4f}, AUC: {auc:.4f}")

        # 保存最佳模型并实现早停
        if auc > best_auc:
            best_auc = auc
            counter = 0
            torch.save(model.state_dict(), "model_triplechannel_best.pth")
        else:
            counter += 1
            if counter >= patience:
                print(f"Early stopping at epoch {epoch + 1}. Best AUC: {best_auc:.4f}")
                break

    print(f"最佳模型保存为: model_triplechannel_best.pth")
    return "model_triplechannel_best.pth", best_auc


if __name__ == "__main__":
    # 设置计算设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # 加载数据
    data, x_fin_tensor, x_text_tensor, label_df = load_data()

    # 训练模型并保存最佳模型
    best_model_path, best_auc = train_model(data, x_fin_tensor, x_text_tensor, label_df, device)
    print(f"\nBest model AUC: {best_auc:.4f}")

    # 加载最佳模型进行预测
    model = ImprovedTripleChannelFusionGIN(
        attr_dim=data.x.shape[1],
        fin_dim=x_fin_tensor.shape[1],
        text_dim=x_text_tensor.shape[1]
    ).to(device)
    model.load_state_dict(torch.load(best_model_path))
    model.eval()

    # 预测有标签节点的结果
    labeled_idx = (data.y[:, 0] != -1).nonzero(as_tuple=True)[0]
    with torch.no_grad():
        logits = model(data.x.to(device), data.edge_index.to(device), x_fin_tensor.to(device), x_text_tensor.to(device))[labeled_idx]
        probs = torch.sigmoid(logits).squeeze().cpu().numpy()

    # 保存预测结果到CSV文件
    results_df = pd.DataFrame({
        "Company_id": label_df["company_id"].values,
        "Probability": probs
    })
    results_df.to_csv("results_improved_triplechannel_gin_2022215030_朱建宇.csv", index=False)
    print("Improved triple channel GIN model results saved as: results_improved_triplechannel_gin_2022215030_朱建宇.csv")