# coding:utf-8
# utils/file_utils.py

import os
import shutil
import pandas as pd

from utils.network_fusion import compute_combined_weight


def read_edge_list(file_path, delimiter='\t', header=None, weighted=False):
    """
    读取边列表文件，返回 DataFrame 或列表。
    :param file_path: 文件路径
    :param delimiter: 分隔符
    :param header: 是否有表头
    :param weighted: 是否带权重列
    :return: 边列表 [ (u, v, w?), ... ]
    """
    try:
        df = pd.read_csv(file_path, sep=delimiter, header=header, engine='python')
        edges = []
        for _, row in df.iterrows():
            u = row[0]
            v = row[1]
            if weighted and len(row) > 2:
                w = float(row[2])
                edges.append((str(u), str(v), w))
            else:
                edges.append((str(u), str(v)))
        return edges
    except Exception as e:
        raise ValueError(f"读取边文件失败：{e}")


def write_edge_list(edges, file_path, weighted=True):
    """
    写入边列表文件。
    :param edges: 边列表 [(u, v, w?), ...]
    :param file_path: 输出文件路径
    :param weighted: 是否写入权重
    """
    os.makedirs(os.path.dirname(file_path), exist_ok=True)
    with open(file_path, 'w') as f:
        for edge in edges:
            if weighted and len(edge) == 3:
                f.write(f"{edge[0]}\t{edge[1]}\t{edge[2]:.4f}\n")
            elif len(edge) >= 2:
                f.write(f"{edge[0]}\t{edge[1]}\n")


def copy_files(src_dir, dst_dir):
    """
    拷贝目录下的所有文件到目标目录。
    :param src_dir: 源目录
    :param dst_dir: 目标目录
    """
    os.makedirs(dst_dir, exist_ok=True)
    for filename in os.listdir(src_dir):
        shutil.copy(os.path.join(src_dir, filename), os.path.join(dst_dir, filename))


def ensure_dir(directory):
    """
    确保指定目录存在。
    """
    if not os.path.exists(directory):
        os.makedirs(directory)


def clean_filename(filename):
    """
    清理文件名中的非法字符。
    """
    valid_chars = "-_.() %s%s" % ("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ", "0123456789")
    cleaned = ''.join(c for c in filename if c in valid_chars)
    return cleaned or "unnamed"


def get_all_entrez_ids(network_file):
    """
    从网络文件中提取所有唯一 Entrez ID。
    :param network_file: 网络文件路径
    :return: list of int
    """
    ids = set()
    with open(network_file, 'r') as f:
        for line in f:
            if line.startswith('#'):
                continue
            parts = line.strip().split()
            if len(parts) >= 2:
                ids.update(map(int, parts[:2]))
    return sorted(list(ids))


def merge_networks(ppi_file, expr_file, output_file, alpha=0.5):
    """
    从两个网络中读取边，进行融合并保存。
    """
    ppi_edges = read_edge_list(ppi_file, weighted=True)
    expr_edges = read_edge_list(expr_file, weighted=True)

    fused_edges = []

    # 使用字典记录边的权重
    edge_weights = {}

    # 遍历 PPI 边并融合
    for u, v, w in ppi_edges:
        key = tuple(sorted((u, v)))
        if key not in edge_weights:
            edge_weights[key] = {'ppi': w, 'expr': 0}
        else:
            edge_weights[key]['ppi'] = w

    # 遍历 Gene Expression 边并融合
    for u, v, w in expr_edges:
        key = tuple(sorted((u, v)))
        if key not in edge_weights:
            edge_weights[key] = {'ppi': 0, 'expr': w}
        else:
            edge_weights[key]['expr'] = w

    # 计算融合后边的权重
    for (u, v), weights in edge_weights.items():
        combined_weight = compute_combined_weight(float(weights['ppi']), float(weights['expr']), alpha)
        fused_edges.append((u, v, combined_weight))

    # 写入融合网络
    write_edge_list(fused_edges, output_file, weighted=True)


def load_interlayer_mapping(mapping_file="data/processed/layer_mapping.txt"):
    """
    读取层间映射关系，用于 LAMA 社区检测算法中的跨层扩展。
    :param mapping_file: 映射文件路径
    :return: dict {protein_id: gene_id}
    """
    mapping = {}
    with open(mapping_file, 'r') as f:
        for line in f:
            if line.startswith('#'):
                continue
            parts = line.strip().split()
            if len(parts) >= 2:
                protein_id, gene_id = parts[0], parts[1]
                score = float(parts[2]) if len(parts) == 3 else 1.0
                mapping[protein_id] = (gene_id, score)
    return mapping


def map_protein_to_expr(ppi_file, mapping_dict):
    """
    将 PPI 网络中的节点映射为表达网络中的对应节点。
    :param ppi_file: 输入 PPI 边文件
    :param mapping_dict: Protein -> Gene 映射字典
    :return: 映射后的边列表 [(gene_u, gene_v, weight)]
    """
    mapped_edges = []
    edges = read_edge_list(ppi_file, weighted=True)
    for u, v, w in edges:
        gene_u = mapping_dict.get(u, None)
        gene_v = mapping_dict.get(v, None)
        if gene_u and gene_v:
            mapped_edges.append((gene_u[0], gene_v[0], w))  # 只保留 Gene ID
    return mapped_edges


def generate_layer_mapping_report(mapping_dict, report_file="analysis/reports/layer_mapping_report.txt"):
    """
    生成层间映射报告，统计成功映射的比例。
    """
    total = len(mapping_dict)
    successful = sum(1 for k, v in mapping_dict.items() if v[0])
    failed = total - successful

    with open(report_file, 'w') as f:
        f.write(f"总映射数: {total}\n")
        f.write(f"成功映射: {successful}\n")
        f.write(f"失败映射: {failed}\n")
        f.write("\n详细映射信息:\n")
        for k, v in mapping_dict.items():
            f.write(f"{k} → {v[0]} | Score: {v[1]}\n")


def write_edge_list(edges, file_path, weighted=True):
    """
    将边列表写入文件。
    :param edges: 边列表 [(u, v, w?)]
    :param file_path: 输出文件路径
    :param weighted: 是否包含权重列
    """
    os.makedirs(os.path.dirname(file_path), exist_ok=True)
    with open(file_path, 'w') as f:
        for edge in edges:
            if weighted and len(edge) == 3:
                u, v, w = edge
                f.write(f"{u}\t{v}\t{w:.4f}\n")
            else:
                u, v = edge
                f.write(f"{u}\t{v}\n")
