"""
计算SV 区域附近的单倍型, 并检查SV是否在block区域内
"""

import re
import os
import sys
import subprocess
import pandas as pd

SAMPLE_NAME = sys.argv[1]
FLANK=500000

SV_DATA_FILE_PATH = "./SV_data.test.txt"
sv_df = pd.read_csv(SV_DATA_FILE_PATH)

def exec_cmd(command):
    process = subprocess.Popen(
        command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
    )
    output, error = process.communicate()
    if process.returncode == 0:
        log={
            "type": "info",
            "context": f"{command} \n {output.decode()} \n {error.decode()} \n"
        }
    else:
        print(f"ERROR({process.returncode}): {command} \n {error.decode()} \n")
        log={
            "type": "error",
            "context": f"ERROR({process.returncode}): {command} \n {error.decode()} \n"
        }

    return log



def run_haploview(sv_id, chr_id, left_position, right_position):
    """
    执行计算单倍型的相关命令
    """
    commands = []
    sv_dir = f"./{sv_id}"
    os.makedirs(sv_dir, exist_ok=True)

    # step1: 提取数据
    commands.append(f"""
        bcftools view --regions {chr_id}:{left_position}-{right_position} \
        /public/home/yunlzhang/data/Jilin/3613/reference_panel/JL.3613.imputed.vcf.gz \
        -Oz -o {sv_dir}/SV.vcf.gz
    """)
    # step2: 格式化数据
    commands.append(f"""
        zcat {sv_dir}/SV.vcf.gz | awk 'BEGIN{{OFS="\\t"}} /^#/ {{print $0; next}} {{$3=$1"_"$2; print $0}}' | bgzip -f > {sv_dir}/SV.id.vcf.gz
    """)
    # step3: plink 转换数据
    commands.append(f"""
        plink --vcf {sv_dir}/SV.id.vcf.gz --allow-extra-chr --geno 0.5 --maf 0.05 --recode --out {sv_dir}/SV.plink
    """)

    # step4: 生成haploview入口文件
    commands.append(f"""
        sed -i 's/-9/0/g' {sv_dir}/SV.plink.ped && awk 'BEGIN {{OFS="\\t"}} {{print $2, $4}}' {sv_dir}/SV.plink.map > {sv_dir}/SV.plink.info
    """)

    # step5: 执行haploview
    commands.append(f"""
        java -Xmx10g -jar ~/software/Haploview.jar \
            -nogui \
            -pedfile {sv_dir}/SV.plink.ped \
            -info {sv_dir}/SV.plink.info \
            -minMAF 0.05 \
            -missingCutoff 0.5 \
            -maxDistance 500 \
            -blockoutput \
            -blockCutHighCI 0.8 \
            -pairwiseTagging \
            -out {sv_dir}/haploview
    """)

    format_commands = [re.sub(r"\s+", " ", command) for command in commands]

    for cmd in format_commands:
        exec_cmd(cmd)


def clear_haploview(sv_id):
    """
    清理haploview过程文件
    """
    if os.path.exists(f"./{sv_id}"):
        os.system(f"rm -rf ./{sv_id}")



def parse_block(sv_id, sv_start, sv_stop):
    """
    解析haploview的结果
    """
    block_file = f"./{sv_id}/haploview.GABRIELblocks"
    info_file = f"./{sv_id}/SV.plink.info"
    all_snp_df = pd.DataFrame()
    all_block_context = []

    include_sv_block=""

    if os.path.exists(block_file) and os.path.exists(info_file):
        info_df = pd.read_csv(info_file, header=None, sep="\t")
        block_count = 0
        with open(block_file, "r") as f:
            lines = f.readlines()
            block_context = []
            block_id = ""
            for line in lines:
                line_text = line.strip()
                if "BLOCK" in line_text:
                    block_count+=1
                    block_id = f"{sv_id}-{block_count}"
                    line_list = line_text.split(" ")
                    marker_index = line_list.index("MARKERS:")
                    snp_indexs = [int(v)-1 for v in line_list[marker_index+1:]]
                    item_snp_df = info_df.iloc[snp_indexs][[0]].copy()
                    item_snp_df[1] = block_id

                    # check if sv in block
                    block_range = [int(info_df.iloc[snp_indexs[0]][1]), int(info_df.iloc[snp_indexs[-1]][1])]
                    if block_range[0] <= sv_start and block_range[1] >= sv_stop:
                        include_sv_block = block_id

                    if len(all_snp_df) > 0:
                        all_snp_df = pd.concat([all_snp_df, item_snp_df])
                    else:
                        all_snp_df = item_snp_df

                    if len(block_context) > 0:
                        all_block_context += block_context
                        block_context = []
                else:
                    if len(block_context) == 0 and block_id !="":
                        block_context.append(f">{block_id}")
                        block_context.append(line_text)
                    else:
                        block_context.append(line_text)
            if len(block_context) > 0:
                all_block_context += block_context

    else:
        clear_haploview(sv_id)

    return all_snp_df, all_block_context, include_sv_block


def find_tag_snp(sv_id, sv_snp_df):
    """
    找出block中的Tag SNP
    """
    tag_file = f"./{sv_id}/haploview.TAGS"
    snp_tag_df = sv_snp_df.set_index(0)
    snp_tag_df["Tag"] = "N"

    skiprows = 0
    target_line_exists = False

    with open(tag_file, "r") as f:
        for line in f:
            if "Test	Alleles Captured" in line:
                target_line_exists = True
                break
            else:
                skiprows+=1

    # check if target line exists
    if target_line_exists:
        tag_df = pd.read_csv(tag_file, sep="\t", skiprows=skiprows)
        for index, row in tag_df.iterrows():
            if row["Test"] in snp_tag_df.index:
                if len(row["Alleles Captured"].split(",")) > 1:
                    snp_tag_df.at[row["Test"], 'Tag'] = "Y"

    snp_tag_df.reset_index(inplace=True)
    return snp_tag_df


def haploview():
    target_df = sv_df[(sv_df["check"] == True) & (sv_df["genome"] == SAMPLE_NAME)]

    block_snp_file_path = f"./{SAMPLE_NAME}_block_snp.txt"
    block_context_file_path = f"./{SAMPLE_NAME}_block_context.txt"
    sv_block_file_path = f"./{SAMPLE_NAME}_sv_block.txt"

    if os.path.exists(block_snp_file_path):
        os.remove(block_snp_file_path)
    if os.path.exists(block_context_file_path):
        os.remove(block_context_file_path)
    if os.path.exists(sv_block_file_path):
        os.remove(sv_block_file_path)

    for index, row in target_df.iterrows():
        chr_id = row["#reference"]
        start = row["ref_start"]
        stop = row["ref_stop"]
        left_position = start - FLANK if start > FLANK else 0
        right_position = stop + FLANK

        sv_id = f"{SAMPLE_NAME}-{chr_id}-{start}"

        # 计算单倍型
        run_haploview(sv_id, chr_id, left_position, right_position)

        # 解析block结果
        ld_snp_df, ld_block_context, include_sv_block = parse_block(sv_id, start, stop)

        # 找出Tag SNP
        snp_tag_df = find_tag_snp(sv_id, ld_snp_df)

        # 输出结果
        if len(snp_tag_df) > 0:
            snp_tag_df.to_csv(block_snp_file_path, index=False, header=False, sep="\t", mode="a")
        if len(ld_block_context) > 0:
            with open(block_context_file_path, "a+") as f:
                for line in ld_block_context:
                    f.write(f"{line}\n")

        if include_sv_block:
            with open(sv_block_file_path, "a+") as f:
                f.write(f"{include_sv_block}\n")

        # 清理haploview过程文件
        clear_haploview(sv_id)

if __name__ == "__main__":
    try:
        haploview()
    except Exception as e:
        print(e)
