"""
2022.10.17 Liukb
    AnnotSV 对 SV 的 vcf 进行注释，过滤
"""

import re
import os
import sys
import attr
import logging
import argparse
import subprocess as sp
from collections import defaultdict
from SV_db_tools import SV_DB
from SV_vcf_tools import VCF


logging.basicConfig(
    level = logging.DEBUG,
    format = "[%(asctime)s - %(levelname)-4s - %(module)s : line %(lineno)s] %(message)s",
    datefmt = "%Y-%m-%d %H:%M:%S"
)

@attr.s
class AnnotSV:
    vcf = attr.ib(type=str, default=attr.Factory(str))
    hpo = attr.ib(type=str, default=attr.Factory(str))
    sample = attr.ib(type=str, default=attr.Factory(str))
    run_dir = attr.ib(type=str, default=attr.Factory(str))
    out_file = attr.ib(type=str, default=attr.Factory(str))
    bcftools = attr.ib(type=str, default=attr.Factory(str))
    AnnotSV_dir = attr.ib(type=str, default=attr.Factory(str))
    sv_db_file = attr.ib(type=str, default=attr.Factory(str))

    def __attrs_post_init__(self):
        self.anno_out = os.path.join(self.run_dir, "AnnotSV_origin_anno.tsv")
        if not self.AnnotSV_dir:
            self.AnnotSV_dir = "/share/apps/AnnotSV/AnnotSV-master"
    
    def run(self, overlap: int):
        """ 运行 AnnotSV 注释"""
        if self.hpo:
            self.hpo = f'-hpo "{self.hpo}"'
        # -annotationMode full \\
        log_file = os.path.join(self.run_dir, f"AnnotSV.log")
        cmd = f'''
        export ANNOTSV={self.AnnotSV_dir}

        $ANNOTSV/bin/AnnotSV \\
            -genomeBuild GRCh37 \\
            {self.hpo} -overlap {overlap} \\
            -includeCI 0 \\
            -outputFile {self.anno_out} \\
            -bcftools {self.bcftools} \\
            -SvinputFile {self.vcf} >{log_file} 2>&1
        '''
        logging.info(f"运行 AnnotSV ...")
        sp.run(cmd, shell=True)
        if not os.path.isfile(self.anno_out):
            logging.error(f'分析出错, 核查: \n{log_file}')
            return
        logging.info(f"AnnotSV 结果保存在： {self.anno_out}")
        run_sh = os.path.join(self.run_dir, f"{self.sample}_AnnotSV.sh")
        with open(run_sh, 'w') as f:
            f.write(cmd)

    def get_freq(self) -> dict:
        """ 获得 sv 在数据库中的人群频率，以 ID 字典形式储存并返回 """
        if not os.path.isfile(self.vcf):
            logging.error(f"文件不存在")
            sys.exit(1)
        logging.info(f'查询 sv 人群频率 {self.sv_db_file}')
        sv_db = SV_DB(self.sv_db_file)
        sv_vcf = VCF(f'{self.sample}:{self.vcf}')
        freq_dt = dict()
        db_generator = sv_db.Iter()
        db_rec = next(db_generator, None)
        for vcf_rec in sv_vcf.Iter():
            if not db_rec:
                freq_dt[vcf_rec.ID] = '.'
                continue
            # vcf 的染色体顺序更小时，加载下一个数据库记录
            while db_rec and \
                sv_vcf.records.allow_chrom.index(vcf_rec.CHROM) > sv_vcf.records.allow_chrom.index(db_rec.Chr1):
                db_rec = next(db_generator, None)
                continue
            # 数据库记录中的染色体跳过了 vcf 中的染色体
            if sv_vcf.records.allow_chrom.index(vcf_rec.CHROM) < sv_vcf.records.allow_chrom.index(db_rec.Chr1):
                freq_dt[vcf_rec.ID] = '.'
                continue
            # 同染色体的情况下数据库记录顺序更小
            while db_rec and vcf_rec.CHROM == db_rec.Chr1 and int(db_rec.Pos1) < int(vcf_rec.POS):
                db_rec = next(db_generator, None)
                continue
            if db_rec and vcf_rec.CHROM == db_rec.Chr1 and int(db_rec.Pos1) > int(vcf_rec.POS):
                freq_dt[vcf_rec.ID] = '.'
                continue
            while db_rec and vcf_rec.CHROM == db_rec.Chr1 and int(db_rec.Pos1) == int(vcf_rec.POS):
                if db_rec.Type != vcf_rec.INFO['SVTYPE']:
                    db_rec = next(db_generator, None)
                    continue
                if vcf_rec.INFO['SVTYPE'] == 'BND':
                    if db_rec.Type == 'BND':
                        Chr2, Pos2 = re.findall(r'[\[\]](\w+):(\d+)', vcf_rec.ALT)[0]
                        if db_rec.Chr2 == Chr2 and db_rec.Pos2 == Pos2:
                            freq_dt[vcf_rec.ID] = str(round(db_rec.Count / sv_db.sample_num, 4))
                            break
                else:
                    if db_rec.Chr2 == vcf_rec.CHROM and db_rec.Pos2 == vcf_rec.INFO['END']:
                        freq_dt[vcf_rec.ID] = str(round(db_rec.Count / sv_db.sample_num, 4))
                        break
                db_rec = next(db_generator, None)
            if vcf_rec.ID not in freq_dt:
                freq_dt[vcf_rec.ID] = '.'
        logging.info(f'共 {len(freq_dt)} 个 SV 进行查询.')
        return freq_dt

    def format_res(self):
        """ 整理 AnnotSV 结果 """
        target_header = [
            'confidence_score', 'ACMG_class', 'Annotation_mode', 'SV_chrom', 
            'SV_start', 'SV_end', 'SV_length', 'SV_type', 'ALT', 'GT', 'QUAL', 
            'FILTER', 'CIPOS', 'CIEND', 'IMPRECISE', 'SVTOOL', 'NUM_SVTOOLS', 
            'PE', 'SR', 'BND_DEPTH', 'MATE_BND_DEPTH', 'Inter_freq', 'CytoBand', 
            'Gene_name', 'Gene_count', 'Tx', 'Tx_start', 'Tx_end', 'Frameshift', 
            'Intersect_start', 'Intersect_end', 'P_gain_phen', 
            'P_gain_hpo', 'P_gain_source', 'P_gain_coord', 'P_loss_phen', 
            'P_loss_hpo', 'P_loss_source', 'P_loss_coord', 'P_ins_phen', 
            'P_ins_hpo', 'P_ins_source', 'P_ins_coord', 'P_snvindel_nb', 
            'P_snvindel_phen', 'B_gain_source', 'B_gain_coord', 'B_gain_AFmax', 
            'B_loss_source', 'B_loss_coord', 'B_loss_AFmax', 'B_ins_source', 
            'B_ins_coord', 'B_ins_AFmax', 'B_inv_source', 'B_inv_coord', 
            'B_inv_AFmax', 'GC_content_left', 'GC_content_right', 'HI', 
            'TS', 'DDD_HI_percent', 'ExAC_delZ', 'ExAC_dupZ', 'ExAC_cnvZ', 
            'ExAC_synZ', 'ExAC_misZ', 'GnomAD_pLI', 'ExAC_pLI', 
            'GenCC_disease', 'GenCC_moi', 'GenCC_classification', 'GenCC_pmid', 
            'Exomiser_gene_pheno_score', 'Human_pheno_evidence', 'Mouse_pheno_evidence',
            'Fish_pheno_evidence', 'OMIM_ID', 'OMIM_phenotype', 'OMIM_inheritance',
            'OMIM_morbid', 'OMIM_morbid_candidate' 
        ]
        out_header = []
        full_info = []
        filter_info = []
        if not os.path.isfile(self.anno_out):
            logging.error(f'文件不存在: {self.anno_out}')
            return
        logging.info(f"AnnotSV 结果过滤 ...")
        for dt in self._conver_2_dict():
            if not out_header:
                out_header = [k for k in target_header if k in dt.keys()]
            full_info.append("\t".join(dt[k] if dt[k] else '.' for k in out_header))
            if self._pass(dt):
                filter_info.append("\t".join(dt[k] if dt[k] else '.' for k in out_header))
        full_anno_file = f"{self.out_file}.full.tsv"
        with open(full_anno_file, 'w') as fo:
            fo.write("\t".join(out_header) + "\n")
            fo.write("\n".join(full_info))
        with open(self.out_file, 'w') as fo:
            fo.write("\t".join(out_header) + "\n")
            fo.write("\n".join(filter_info))
        logging.info(f"AnnotSV 完整结果: {full_anno_file}")
        logging.info(f"AnnotSV 过滤后的结果: {self.out_file}")

    def _conver_2_dict(self) -> defaultdict:
        """ 将 AnnotSV 结果转成 dict 形式的生成器 """
        GT_dict = {
            './.': '.',
            '0/1': 'HET',
            '1/0': 'HET',
            '1/1': 'HOM',
            '0/0': '.',
            '': '.'
        }
        acmg_class = {
            '5': '致病性的',
            '4': '可能致病的',
            '3': '意义不明的',
            '2': '可能良性的',
            '1': '良性的',
        }
        freq_dt = self.get_freq()
        with open(self.anno_out) as fi:
            for line in fi:
                if not line.strip():
                    continue
                line_dict = defaultdict(str)
                if line.startswith('AnnotSV_ID'):
                    header = line.strip('\n').split('\t')
                    continue
                _ = [line_dict.update({k: v}) for k, v in zip(header, line.strip('\n').split('\t'))]
                # 拆分数据项
                for item in line_dict['INFO'].split(';'):
                    k, v = item.split('=')
                    if k == 'IMPRECISE':
                        v = 'yes'
                    line_dict[k] = v
                for k, v in zip(line_dict['FORMAT'].split(':'), line_dict[self.sample].split(':')):
                    line_dict[k] = v
                line_dict['confidence_score'] = self._confidence_score(line_dict)
                line_dict['GT'] = GT_dict[line_dict['GT']]
                for k, v in acmg_class.items():
                    line_dict['ACMG_class'] = line_dict['ACMG_class'].replace(k, v)
                if not line_dict['BND_DEPTH']:
                    line_dict['BND_DEPTH'] = '.'
                if not line_dict['MATE_BND_DEPTH']:
                    line_dict['MATE_BND_DEPTH'] = '.'
                if line_dict['ID'] in freq_dt:
                    line_dict['Inter_freq'] = freq_dt[line_dict['ID']]
                else:
                    line_dict['Inter_freq'] = '.'
                yield line_dict

    def _confidence_score(self, dt) -> str:
        """ 
        变异可信度打分 
            FILTER = PASS                   +1
            QUAL = 999                      +1
            PE > 10                         +1
            SR > 10                         +1
            PE > 10 & SR > 10               +2
            NUM_SVTOOLS >1                  +4
            CIPOS sum                       +{0:5,1-5:4,6-10:3,11-20:2,21-30:1}
            CIEND sum                       +{0:5,1-5:4,6-10:3,11-20:2,21-30:1}
            SU > BND_DEP/4 & BND_DEP > 50   +4 
            IMPRECISE                       -5
        """
        score = 0
        level_score = [5,4,4,4,4,4,3,3,3,3,3,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,1,1,1]
        if dt['FILTER'] == 'PASS':
            score += 1
        if dt['QUAL'] == '999':
            score += 1
        if len(dt['PE']) > 1:
            if len(dt['SR']) > 1:
                score += 4
            else:
                score += 1
        elif len(dt['SR']) > 1:
            score += 1
        if int(dt['NUM_SVTOOLS']) > 1:
            score += 4
        if dt['CIPOS']:
            cipos = sum(abs(int(i)) for i in dt['CIPOS'].split(','))
            if cipos < 31:
                score += level_score[cipos]
        if dt['CIEND']:
            ciend = sum(abs(int(i)) for i in dt['CIEND'].split(','))
            if ciend < 31:
                score += level_score[ciend]
        if dt['PE'] and dt['SR'] and dt['BND_DEPTH'] and \
            int(dt['BND_DEPTH']) > 20 and \
            (int(dt['PE']) + int(dt['SR']))*4 > int(dt['BND_DEPTH']) :
            score += 4
        if dt['IMPRECISE'] == 'PASS':
            score -= 5
        return str(score)

    def _pass(self, dt) -> bool:
        """ 判断变异是否合格 """
        if int(dt['confidence_score']) < 5:
            return False
        if dt['PE'] and dt['SR'] and int(dt['PE']) + int(dt['SR']) <= 5:
            return False
        # ignore ACMG = B / LB
        if '良性的' in dt['ACMG_class']:
            return False
        if dt['IMPRECISE'] == 'yes':
            return False
        if dt['QUAL'] != '.' and float(dt['QUAL']) < 10:
            return False
        if dt['Inter_freq'] != '.' and float(dt['Inter_freq']) > 0.1:
            return False
        return True


if __name__ == '__main__':
    parse = argparse.ArgumentParser()
    parse.add_argument('--vcf', required=True)
    parse.add_argument('--sample', required=True)
    parse.add_argument('--hpo', default='')
    parse.add_argument('--run_dir', default='./')
    parse.add_argument('--overlap', default=80, type=int)
    parse.add_argument('--out_file', default='./anno.txt')
    parse.add_argument('--filter', default=False, action='store_true')
    parse.add_argument('--bcftools', default='/share/apps/bcftools/bin/bcftools')
    parse.add_argument('--AnnotSV_dir', default='/share/apps/AnnotSV/AnnotSV-master')
    parse.add_argument('--sv_db_file', default='/analysis_s140/reference/GkInnerDB/SV/V1/sv.db')
    opts = parse.parse_args()
    anno_sv = AnnotSV(opts.vcf, opts.hpo, opts.sample, opts.run_dir, opts.out_file, 
        opts.bcftools, opts.AnnotSV_dir, opts.sv_db_file
    )
    anno_sv.run(opts.overlap)
    if opts.filter:
        anno_sv.format_res()

    # for sample in ['SAG21FJ0003476', 'WES21FJ0000581', 'WES21FJ0000632', 'WES21FJ0003602', 'WES22FJ0001715', 'WES22FJ0001906']:
    # sample = 'NBS22FJ0002346'
    # vcf = f'/analysis_s140/liukb/Test/work_check/SV/pipeline/nbs/20221017_N22132_4503panel_nbs_9/NBS22FJ0002346/Discovery/all.vcf'
    # hpo = ''
    # run_dir = f'./NBS22FJ0002346/Anno/'
    # os.makedirs(run_dir, exist_ok=True)
    # out_file = f'./NBS22FJ0002346/Anno/AnnotSV.tsv'
    # bcftools = '/share/apps/bcftools/bin/bcftools'
    # anno_sv = AnnotSV(vcf, hpo, sample, run_dir, out_file, bcftools,
    #     '/share/apps/AnnotSV/AnnotSV-master',
    #     '/analysis_s140/liukb/Test/work_check/SV/database/nbs_db'
    # )
    # # anno_sv.run(80)
    # anno_sv.format_res()
