"""
计算单倍型，将输出文件进行合并，减少文件个数
服务器有文件个数限制
"""

import re
import os
import sys
import math
import multiprocessing
import pandas as pd

SAMPLE_NAME = sys.argv[1]
FLANK=500000

HOME="/public/home/yunlzhang/data/pan-genome"
SV_DATA_FILE_PATH = "./SV_data.txt"
sv_df = pd.read_csv(SV_DATA_FILE_PATH)


def generate_cmd(sv_id, chr_id, left_position, right_position):
    """
    执行计算单倍型的相关命令
    """
    commands = []
    sv_dir = f"{HOME}/{SAMPLE_NAME}/haploview/{sv_id}"
    commands.append(f"mkdir -p {sv_dir}")
    # step1: 提取数据
    commands.append(f"""
        bcftools view --regions {chr_id}:{left_position}-{right_position} \
        /public/home/yunlzhang/data/Jilin/3613/reference_panel/JL.3613.imputed.vcf.gz \
        -Oz -o {sv_dir}/SV.vcf.gz
    """)
    # step2: 格式化数据
    commands.append(f"""
        zcat {sv_dir}/SV.vcf.gz | awk 'BEGIN{{OFS="\\t"}} /^#/ {{print $0; next}} {{$3=$1"_"$2; print $0}}' | bgzip -f > {sv_dir}/SV.id.vcf.gz
    """)
    # step3: plink 转换数据
    commands.append(f"""
        plink --vcf {sv_dir}/SV.id.vcf.gz --allow-extra-chr --geno 0.5 --maf 0.05 --recode --out {sv_dir}/SV.plink
    """)

    # step4: 生成haploview入口文件
    commands.append(f"""
        sed -i 's/-9/0/g' {sv_dir}/SV.plink.ped && awk 'BEGIN {{OFS="\\t"}} {{print $2, $4}}' {sv_dir}/SV.plink.map > {sv_dir}/SV.plink.info
    """)

    # step5: 执行haploview
    commands.append(f"""
        java -jar ~/software/Haploview.jar \
            -nogui \
            -pedfile {sv_dir}/SV.plink.ped \
            -info {sv_dir}/SV.plink.info \
            -minMAF 0.05 \
            -missingCutoff 0.5 \
            -maxDistance 500 \
            -blockoutput \
            -blockCutHighCI 0.8 \
            -pairwiseTagging \
            -out {sv_dir}/haploview
    """)

    format_command = "&&".join([re.sub(r"\s+", " ", command) for command in commands])
    return format_command


def write_file(fp, context):
    with open(fp, "a+") as f:
        for line in context:
            f.write(f"{line}\n")


def write_results(results):
    write_file(f"{HOME}/{SAMPLE_NAME}/BLOCK.txt", results["block_context"])
    write_file(f"{HOME}/{SAMPLE_NAME}/TAG.txt", results["tag_context"])
    write_file(f"{HOME}/{SAMPLE_NAME}/SNP.txt", results["snp_context"])


def read_file(fp):
    with open(fp, "r") as f:
        context = [line.strip() for line in f.readlines()]

    return context


def process(command_set):
    process_data = {
        "block_context": [],
        "tag_context":[],
        "snp_context": []
    }
    for item in command_set:
        sv_id = item["sv_id"]
        sv_dir = item["sv_dir"]

        if not os.path.exists(f"{sv_dir}/haploview.TAGS"):
            os.system(item["cmd"])

        if os.path.exists(f"{sv_dir}/haploview.GABRIELblocks"):
            block_context = read_file(f"{sv_dir}/haploview.GABRIELblocks")
            block_context.insert(0, f">{sv_id}")
            process_data["block_context"] += block_context

            if os.path.exists(f"{sv_dir}/haploview.TAGS"):
                tag_context = read_file(f"{sv_dir}/haploview.TAGS")
                tag_context.insert(0, f">{sv_id}")
                process_data["tag_context"] += tag_context

            if os.path.exists(f"{sv_dir}/SV.plink.info"):
                snp_context = read_file(f"{sv_dir}/SV.plink.info")
                snp_context.insert(0, f">{sv_id}")
                process_data["snp_context"] += snp_context

        # 清理数据目录
        os.system(f"rm -rf {sv_dir}")

    return process_data


def haploview():
    target_df = sv_df[(sv_df["check"] == True) & (sv_df["genome"] == SAMPLE_NAME)].copy()
    target_df.reset_index(inplace=True)

    # 清理汇总结果
    if os.path.exists(f"{HOME}/{SAMPLE_NAME}/BLOCK.txt"):
        os.remove(f"{HOME}/{SAMPLE_NAME}/BLOCK.txt")
    if os.path.exists(f"{HOME}/{SAMPLE_NAME}/TAG.txt"):
        os.remove(f"{HOME}/{SAMPLE_NAME}/TAG.txt")
    if os.path.exists(f"{HOME}/{SAMPLE_NAME}/SNP.txt"):
        os.remove(f"{HOME}/{SAMPLE_NAME}/SNP.txt")

    cores = multiprocessing.cpu_count() - 1
    pools = multiprocessing.Pool(cores)
    core_num = math.ceil(len(target_df) / cores)
    for i in range(cores):
        core_df = target_df.iloc[i*core_num:(i+1)*core_num]
        cmds = []
        for index, row in core_df.iterrows():
            chr_id = row["#reference"]
            start = row["ref_start"]
            stop = row["ref_stop"]
            left_position = start - FLANK if start > FLANK else 0
            right_position = stop + FLANK

            sv_id = f"{chr_id}-{start}"

            cmd = generate_cmd(sv_id, chr_id, left_position, right_position)
            cmds.append({
                "cmd": cmd,
                "sv_id": sv_id,
                "sv_dir": f"{HOME}/{SAMPLE_NAME}/haploview/{sv_id}"
            })

        pools.apply_async(
            func=process,
            args=(cmds,),
            callback=write_results
        )
    pools.close()
    pools.join()


if __name__ == "__main__":
    try:
        haploview()
    except Exception as e:
        print(e)
