import os

import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from app.database import db
from app.logic.utils.ast import gen_ast
from app.logic.utils.csg import gen_csg
from app.logic.utils.filepath import FILE_MODEL_DIR
from app.logic.utils.patch_analysis import PatchAnalysis
from app.logic.utils.pdg import gen_pdg
from app.logic.utils.pre import gen_preprocessor
from app.logic.utils.rep_noise import gen_report_de_noise
from app.logic.utils.token import gen_tokens
from app.models import Metric
from app.models import ProjectInfo


def pre_train(work_dir, project_path):
    # 预处理
    gen_preprocessor(work_dir, project_path)
    # logger.info("预处理完成......")
    print("预处理完成......")
    print("----------------------------")

    # 生成Token
    gen_tokens(work_dir, project_path)
    # logger.info("生成Token完成......")
    print("生成Token完成......")
    print("----------------------------")

    # 生成AST
    gen_ast(work_dir, project_path)
    # logger.info("生成AST完成...")

    print("生成AST完成...")
    print("----------------------------")

    # 生成PDG
    gen_pdg(work_dir, project_path)
    # logger.info("生成PDG完成...")

    print("生成PDG完成...")
    print("----------------------------")

    # 生成CSG向量
    output, attn_weights = gen_csg(work_dir, project_path)
    # logger.info("生成csg向量完成...")

    print("生成csg向量完成...")
    print("----------------------------")

    # 缺陷报告降噪
    features = gen_report_de_noise(work_dir, project_path)
    # logger.info("缺陷报告降噪完成...")

    print("缺陷报告降噪完成...")
    print("----------------------------")

    # print("Output:", output)
    print("Output Shape:", output.shape)
    # print("Attention Weights:", attn_weights)
    print("Attention Weights Shape:", attn_weights.shape)
    # print("features:", features)
    print("features:", features.shape)

    return features, attn_weights


class MLP(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(MLP, self).__init__()
        self.fc1 = nn.Linear(input_dim, hidden_dim)  # 输入层到隐藏层
        self.fc2 = nn.Linear(hidden_dim, output_dim)  # 隐藏层到输出层
        self.relu = nn.ReLU()  # 激活函数

    def forward(self, x):
        x = self.relu(self.fc1(x))  # 输入通过第一层并激活
        x = self.fc2(x)  # 通过第二层输出
        return x


def train(work_dir, project_path, num_epochs=100, k=10):
    # 1. 拼接输入特征 s 和 z
    # 在维度 1 上拼接 s 和 z
    s, z = pre_train(work_dir, project_path)
    m = s.shape[0]
    n = s.shape[1]
    p = z.shape[1]

    print("m:", m, "n:", n, "p:", p)

    z_repeated = z.repeat(m, 1)  # 将 z 扩展为 m x p 维度
    features = torch.cat((s, z_repeated), dim=1)  # 拼接得到 m x (n + p) 特征

    # 3. 初始化 MLP 模型
    input_dim = n + p  # 输入特征的维度
    hidden_dim = 64  # 隐藏层的维度（可以根据需求调整）
    output_dim = 1  # 输出层为 1（预测缺陷文件）

    model = MLP(input_dim, hidden_dim, output_dim)

    # 4. 定义损失函数和优化器
    criterion = nn.BCEWithLogitsLoss()  # 二分类损失函数
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    # 5. 训练过程 todo 生成源文件对应的缺陷报告标签
    # 假设 y_train 是文件是否有缺陷的标签（0 或 1）
    # 获取到与缺陷修复的所有文件，标签为 1，否则标签为 0

    y_train = mark_files_in_patch(work_dir, project_path, m)

    # print("y_train:", y_train)

    for epoch in range(num_epochs):
        model.train()

        # 1. 前向传播
        outputs = model(features)  # 输入拼接特征，得到模型输出
        loss = criterion(outputs, y_train)  # 计算损失

        # 2. 反向传播和优化
        optimizer.zero_grad()  # 清空梯度
        loss.backward(retain_graph=True)  # 保持计算图，允许多次反向传播
        optimizer.step()  # 更新权重

        # 3. 打印损失
        if (epoch + 1) % 10 == 0:
            print(f"Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}")

    # 4. 保存模型
    save_path = os.path.join(work_dir, project_path, FILE_MODEL_DIR, "/file_model.pth")
    print(f"File train model saved to: {save_path}")
    torch.save(model.state_dict(), str(save_path))  # 保存模型参数到文件

    # 6. 最终预测
    model.eval()  # 评估模式
    with torch.no_grad():
        predictions = model(features)  # 得到所有文件的预测值（可疑分数）
        scores = torch.sigmoid(
            predictions
        ).squeeze()  # 将预测值通过sigmoid转为概率（可疑分数）

        # 7. 根据可疑分数排序并选出前 k 个文件
        _, topk_indices = torch.topk(scores, k)  # 获取前 k 个分数最高的文件的索引
        topk_scores = scores[topk_indices]  # 获取前 k 个文件的可疑分数

    # 输出前 k 个缺陷文件的编号及其可疑分数
    print(f"Top {k} predicted defective files and their suspicious scores:")
    for idx, score in zip(topk_indices, topk_scores):
        print(f"File {idx.item()}: Suspicious Score = {score.item():.4f}")

    calculate_score(scores, y_train, project_path, k)

    return topk_indices, topk_scores


# 定义 Top-K Hit Rate、MRR 和 MAP 的计算函数
def top_k_hit_rate(scores, labels, k):
    """
    计算 Top-K Hit Rate.
    scores: 模型预测分数 (Tensor, shape=[m])
    labels: 文件的真实标签 (Tensor, shape=[m])
    k: 取前 k 个文件的命中率
    """
    # 获取分数前 k 个文件的索引
    _, topk_indices = torch.topk(scores, k)
    # 根据索引获取前 k 个文件的真实标签
    topk_labels = labels[topk_indices]
    # 计算命中数（缺陷文件数）
    hit_count = topk_labels.sum().item()
    # 计算 Top-K 命中率
    return hit_count / k


def mean_reciprocal_rank(scores, labels):
    """
    计算 Mean Reciprocal Rank (MRR).
    scores: 模型预测分数 (Tensor, shape=[m])
    labels: 文件的真实标签 (Tensor, shape=[m])
    """
    # 按分数降序排序，获取索引
    _, sorted_indices = torch.sort(scores, descending=True)
    # 根据排序获取标签
    sorted_labels = labels[sorted_indices]
    # 找到第一个相关文件（标签为1）的索引，并加1，转为排名（从1开始）
    ranks = (sorted_labels == 1).nonzero(as_tuple=True)[0] + 1
    # 计算每个缺陷文件的倒数排名
    reciprocal_ranks = 1.0 / ranks.float()
    # 平均倒数排名
    return reciprocal_ranks.mean().item()


def mean_average_precision(scores, labels, k):
    _, sorted_indices = torch.sort(scores, descending=True)  # 按可疑分数排序
    sorted_labels = labels[sorted_indices]  # 排序后的标签
    relevant = sorted_labels == 1  # 查找相关的（有缺陷的）文件
    precision_at_k = []
    for i in range(1, k + 1):
        # 计算前 i 个文件的精确度
        precision_at_k.append(relevant[:i].sum() / i)
    return np.mean(precision_at_k)


def calculate_score(scores, y_train, project_path, k=10):
    # 计算 Top-K Hit Rate（Top 3）
    k1 = 1
    top_1_hit_rate_val = top_k_hit_rate(scores, y_train, k1)
    top_1_hit_rate_val = 0.001 if top_1_hit_rate_val is None else top_1_hit_rate_val
    print(f"Top-{k1} Hit Rate: {top_1_hit_rate_val:.4f}")
    k2 = 5
    top_5_hit_rate_val = top_k_hit_rate(scores, y_train, k2)
    top_5_hit_rate_val = 0.001 if top_5_hit_rate_val is None else top_5_hit_rate_val
    print(f"Top-{k2} Hit Rate: {top_5_hit_rate_val:.4f}")
    k3 = 10
    top_10_hit_rate_val = top_k_hit_rate(scores, y_train, k3)
    top_10_hit_rate_val = 0.001 if top_10_hit_rate_val is None else top_10_hit_rate_val
    print(f"Top-{k3} Hit Rate: {top_10_hit_rate_val:.4f}")

    # 计算 MRR
    mrr_val = mean_reciprocal_rank(scores, y_train)
    mrr_val = 0.001 if mrr_val is None else mrr_val
    print(f"Mean Reciprocal Rank (MRR): {mrr_val:.4f}")

    # 计算 MAP（前 k 个文件）
    map_val = mean_average_precision(scores, y_train, k)
    map_val = 0.001 if map_val is None else map_val
    print(f"Mean Average Precision (MAP): {map_val:.4f}")

    project_name = ProjectInfo.query.filter_by(id=project_path).first().project_name
    # 写入数据库
    db.session.add(
        Metric(
            project_name=project_name,
            met_type="file",
            top1=float(top_1_hit_rate_val),
            top5=float(top_5_hit_rate_val),
            top10=float(top_10_hit_rate_val),
            mrr_score=float(mrr_val),
            map_score=float(map_val),
        )
    )
    db.session.commit()


# 定义递归函数获取所有 .java 文件的名称
def get_java_files_recursive(directory, file_list=None, filepath_list=None):
    if file_list is None:
        file_list = []
    if filepath_list is None:
        filepath_list = []

    # 遍历目录内容
    for entry in os.listdir(directory):
        full_path = os.path.join(directory, entry)

        if os.path.isdir(full_path):  # 如果是目录，则递归调用
            get_java_files_recursive(full_path, file_list, filepath_list)
        elif os.path.isfile(full_path) and full_path.endswith(
            ".java"
        ):  # 如果是 .java 文件
            file_list.append(entry)  # 只保存文件名
            filepath_list.append(full_path)  # 保存所有路径


# 根据所有patch文件包含的文件名，标记文件为可疑文件
def mark_files_in_patch(work_dir, project_path, m):
    file_list = []
    filepath_list = []
    get_java_files_recursive(
        os.path.join(work_dir, project_path, "src"), file_list, filepath_list
    )
    patch_analysis = PatchAnalysis(work_dir, project_path)
    patch_files = patch_analysis.get_java_references()

    print("file_list:", file_list)
    y_train = torch.randint(0, 2, (m, 1)).float()  # 随机生成训练标签
    # y_train = torch.zeros(m, 1).float()
    # for i in range(m):
    #     if file_list[i] in patch_files:
    #         y_train[i] = 1
    return y_train


def get_recommend_filelist(work_dir, project_path):
    topk_indices, topk_scores = train(work_dir, project_path)
    file_list = []
    filepath_list = []
    get_java_files_recursive(
        os.path.join(work_dir, project_path, "src"), file_list, filepath_list
    )
    result = []
    for idx, score in zip(topk_indices, topk_scores):
        result.append(
            {
                "project_id": project_path,
                "file_path": filepath_list[idx.item()],
                "file_name": file_list[idx.item()],
                "score": f"{score.item(): .4f}",
            }
        )
    return result


def file_train(work_dir, project_path):
    topk_indices, topk_scores = train(work_dir, project_path)
    file_list = []
    filepath_list = []
    get_java_files_recursive(
        os.path.join(work_dir, project_path, "src"), file_list, filepath_list
    )
    print("filepath_list:", len(filepath_list))
    print("file_list:", len(file_list))
    result = []
    for idx, score in zip(topk_indices, topk_scores):
        print("可疑文件：", file_list[idx.item()], "可疑分数：", f"{score.item(): .4f}")
        result.append(
            {
                "project_id": project_path,
                "file_path": filepath_list[idx.item()],
                "file_name": file_list[idx.item()],
                "score": f"{score.item(): .4f}",
            }
        )
    print(result)
    return result


#
# work_dir = "../../../data"  # 请修改为实际路径
# project_path = "30"  # 请修改为实际项目路径
# topk_indices, topk_scores = train(work_dir, project_path)
# file_list = []
# filepath_list = []
# get_java_files_recursive(
#     os.path.join(work_dir, project_path, "src"), file_list, filepath_list
# )
# print("filepath_list:", len(filepath_list))
# print("file_list:", len(file_list))
# result = []
# for idx, score in zip(topk_indices, topk_scores):
#     print("可疑文件：", file_list[idx.item()], "可疑分数：", f"{score.item(): .4f}")
#     result.append(
#         [filepath_list[idx.item()], file_list[idx.item()], f"{score.item(): .4f}"]
#     )
# print(result)
