"""
2022.10.12 Liukb
    SV 的人群数据库管理（上传、删除、更新、查询）
"""

import re
import os
import sys
import attr
import time
import logging
import argparse
from glob import glob
from random import random
from SV_vcf_tools import VCF

logging.basicConfig(
    level = logging.INFO,
    format = "[%(asctime)s - %(levelname)-4s - %(module)s : line %(lineno)s] %(message)s",
    datefmt = "%Y-%m-%d %H:%M:%S"
)

class Rec:
    """ SV 数据库的单条数据 """
    def __init__(self, vcf_rec):
        if type(vcf_rec) == str:
            ks = vcf_rec.strip().split('\t')
            self.Chr1 = ks[0]
            self.Pos1 = ks[1]
            self.Chr2 = ks[2]
            self.Pos2 = ks[3]
            self.Type = ks[4]
            self.Count = int(ks[5])
            self.Samples = ks[6].split(',')
        else:
            self.Chr1 = vcf_rec.CHROM
            self.Pos1 = vcf_rec.POS
            if 'END' in vcf_rec.INFO:
                self.Chr2 = vcf_rec.CHROM
                self.Pos2 = vcf_rec.INFO['END']
            else:
                self.Chr2, self.Pos2 = re.findall(r'[\[\]](\w+)\:(\d+)', vcf_rec.ALT)[0]
            self.Type = vcf_rec.INFO['SVTYPE']
            self.Count = 1


@attr.s
class SV_vcf_base:
    """ SV 的 vcf 信息储存 """
    vcf = attr.ib(type=str, default=attr.Factory(str))
    sample = attr.ib(type=str, default=attr.Factory(str))
    min_sv = attr.ib(type=int, default=attr.Factory(int))      # 默认最小 SV 为 50bp

    def __attrs_post_init__(self):
        # 设置最小 SV 的默认值
        if not self.min_sv:
            self.min_sv = 50
        # 仅保留可用的 SV 并构建好顺序
        vcf_rec = VCF(self.vcf, self.min_sv)
        self.records = vcf_rec.Iter()

    def Iter(self) -> Rec:
        for rec in self.records:
            yield Rec(rec)


@attr.s
class SV_DB:
    """ SV 数据库管理
        一开始会自动先处理数据库文件表头信息
    """
    db_file = attr.ib(type=str, default=attr.Factory(str))

    def __attrs_post_init__(self):
        self.all_records = []
        self.locked = False
        self.__header_parse()
        self.allow_chrom = [
            str(i) for i in range(1,23)
        ] + [
            f'CHR{i}' for i in range(1,23) 
        ] + ['X', 'Y', 'CHRX', 'CHRY']
        self.db_log = open(os.path.join(os.path.dirname(self.db_file), '_log'), 'a')

    def ic(self, info: str, Type: str='INFO'):
        """ 日志记录 """
        self.db_log.write(f'[{time.strftime("%Y/%m/%d %H:%M:%S")}] - [{Type}] - {info}\n')
        if Type == 'INFO':
            logging.info(info)
        elif Type == 'DEBUG':
            logging.debug(info)
        elif Type == 'ERROR':
            logging.error(info)
        elif Type == 'WARN':
            logging.warning(info)

    def __header_parse(self):
        """ 解析数据库表头 """
        self.all_num = 0
        self.sample_num = 0
        self.sample_list = []
        if os.path.isfile(self.db_file):
            with open(self.db_file) as db_in:
                for info in db_in:
                    if info.startswith('# '):
                        all_num = int(re.findall(r'all_num=(\d+)', info)[0])
                        sample_num = int(re.findall(r'sample_num=(\d+)', info)[0])
                        sample_list = re.findall(r'<(.+)>', info)[0].split(',')
                        # 数据核查
                        if all_num != len(sample_list):
                            self.ic(f'表头异常，标记记录数量: {all_num}, 实际的记录数量: {len(sample_list)}', 'ERROR')
                            sys.exit(1)
                        if sample_num != len([s for s in sample_list if s]):
                            self.ic(f'表头异常，标记样本数量: {all_num}, 实际的样本数量: {len([s for s in sample_list if s])}', 'ERROR')
                            sys.exit(1)
                        self.all_num += all_num
                        self.sample_num += sample_num
                        self.sample_list.extend(sample_list)

    def _index(self, sample_id: str) -> int:
        """ 为样本挑选合适的 index """
        if sample_id in self.sample_list:
            return self.sample_list.index(sample_id)
        if '' in self.sample_list:
            return self.sample_list.index('')
        return len(self.sample_list)

    def _compare_records(self, db_rec: Rec, vcf_rec: Rec) -> Rec:
        """ 比较两个位置是否可以合并，可以合并的话，返回一个新的 VCF_record 类，否则返回 None """
        if self._same_pos(db_rec, vcf_rec, min=False):
            db_rec.Count += 1
            db_rec.Samples.append(f'{self.in_sample_index}')
            return db_rec

    @staticmethod
    def _same_pos(rec1: Rec, rec2: Rec, min: bool=True) -> bool:
        """ 比较两个变异位置是否一致 """
        # 仅判断 Chr1、Pos1 两项是否一致
        if min:
            return(rec1.Chr1 == rec2.Chr1 and rec1.Pos1 == rec2.Pos1)
        return(rec1.Chr1 == rec2.Chr1 and rec1.Pos1 == rec2.Pos1 and \
            rec1.Chr2 == rec2.Chr2 and rec1.Pos2 == rec2.Pos2 and \
            rec1.Type == rec2.Type)

    def Iter(self) -> Rec:
        if self.all_records:
            for rec in self.all_records:
                yield rec
        elif os.path.isfile(self.db_file):
            with open(self.db_file) as db_in:
                for line in db_in:
                    if line.startswith('#'):
                        continue
                    yield Rec(line)
        else:
            return

    def __del__(self):
        if self.locked:
            os.remove(self.lock_file)
        self.db_log.close()

    def __db_lock(self, mark: str):
        """ 更改数据库前给数据库上锁, 防止混乱的数据插入与更新 """
        # 数据库写入保护
        self.lock_file = os.path.join(os.path.dirname(self.db_file), f".lock.{os.path.basename(self.db_file)}")
        all_sleep_time = 0
        loop = True
        while loop:
            if not os.path.isfile(self.lock_file):
                with open(self.lock_file, 'w') as fl:
                    fl.write(mark)
                    continue
            # 二次确认是本程序的锁
            with open(self.lock_file) as fi:
                in_s = fi.read()
                if mark == in_s:
                    self.__header_parse()
                    self.locked = True
                    return
            if all_sleep_time > 1800:
                self.ic(f'超过 30 分钟数据库还没有解锁 !!!')
                sys.exit(1)
            if all_sleep_time == 0:
                self.ic(f'数据库正在被其他程序修改，等待 ... ')
            sleep_time = random() * 5
            all_sleep_time += sleep_time
            time.sleep(sleep_time)
            continue

    def upload(self, vcf: SV_vcf_base):
        """ 上传 vcf 数据到数据库 """
        # 数据库保护
        if not self.locked:
            mark = f"{time.time()}-{vcf.sample}"
            self.__db_lock(mark)
        # 样本已存在
        if vcf.sample in self.sample_list:
            self.ic(f"{vcf.sample} 样本已存在，数据库更新该样本数据")
            self.delete(vcf)
        # 非测试样本、样本编号需要合法
        if '_test' in vcf.vcf:
            self.ic(f'vcf {vcf.vcf} 中包含 "_test" 字样，不上传人群数据库。')
            return
        if '_reanalysis' in vcf.vcf:
            self.ic(f'vcf {vcf.vcf} 中包含 "_reanalysis" 字样，不上传人群数据库。')
            return
        if len(vcf.sample) != 14:
            self.ic(f'样本编号 {vcf.sample} 不是 14 位，不上传人群数据库。')
            return
        self.ic(f'上传样本 {vcf.sample} 到人群数据库')
        all_records = []
        self.in_sample_index = self._index(vcf.sample)
        db_generator = self.Iter()
        vcf_generator = vcf.Iter()
        db_rec: Rec = next(db_generator, None)
        vcf_rec: Rec = next(vcf_generator, None)
        while db_rec or vcf_rec:
            if not db_rec:
                vcf_rec.Samples = [f'{self.in_sample_index}']
                all_records.append(vcf_rec)
                vcf_rec = next(vcf_generator, None)
                continue
            # 仅有输入项，直接保存输入项
            elif not vcf_rec:
                all_records.append(db_rec)
                db_rec = next(db_generator, None)
                continue
            # 处理一个多个位置相同的 SV
            db_rec_list = []
            vcf_rec_list = []
            same_pos = False
            db_next = None
            vcf_next = None
            while self._same_pos(db_rec, vcf_rec):
                if not db_next:
                    db_next = next(db_generator, None)
                if db_next and self._same_pos(db_next, vcf_rec):
                    db_rec_list.append(db_rec)
                    db_rec = db_next
                    db_next = None
                    continue
                vcf_next = next(vcf_generator, None)
                if vcf_next and self._same_pos(vcf_next, vcf_rec):
                    vcf_rec_list.append(vcf_rec)
                    vcf_rec = vcf_next
                    continue
                same_pos = True
                break
            db_rec_list.append(db_rec)
            vcf_rec_list.append(vcf_rec)
            done_rec = []
            call_db = False     # 判断是否需要载入下一个 db_rec
            call_vcf = False     # 判断是否需要载入下一个 vcf_rec
            for db_rec in db_rec_list:
                for vcf_rec in vcf_rec_list:
                    res = self._compare_records(db_rec, vcf_rec)
                    if res:
                        all_records.append(res)
                        done_rec.extend([db_rec, vcf_rec])
                        if not db_next:
                            call_db = True
                        if not vcf_next:
                            call_vcf = True
                        break
                    # 比较记录顺序，靠前的先写入 all_records 内
                    db_chr_index = self.allow_chrom.index(db_rec.Chr1)
                    vcf_chr_index = self.allow_chrom.index(vcf_rec.Chr1)
                    if db_chr_index < vcf_chr_index:
                        all_records.append(db_rec)
                        done_rec.append(db_rec)
                        call_db = True
                        continue
                    elif vcf_chr_index < db_chr_index:
                        vcf_rec.Samples = [f'{self.in_sample_index}']
                        all_records.append(vcf_rec)
                        call_vcf = True
                        done_rec.append(vcf_rec)
                        continue
                    else:
                        if int(db_rec.Pos1) < int(vcf_rec.Pos1):
                            all_records.append(db_rec)
                            call_db = True
                            done_rec.append(db_rec)
                            continue
                        elif int(vcf_rec.Pos1) < int(db_rec.Pos1):
                            vcf_rec.Samples = [f'{self.in_sample_index}']
                            all_records.append(vcf_rec)
                            call_vcf = True
                            done_rec.append(vcf_rec)
                            continue
                        # 位置相等后续一次性处理
            for rec in db_rec_list + vcf_rec_list:
                # 多个位置相等的记录比较完后，添加到结果表内，其他情况的记录留着后续再比较
                if rec not in done_rec and same_pos:
                    if rec in vcf_rec_list:
                        rec.Samples = [f'{self.in_sample_index}']
                    all_records.append(rec)
                    call_db = True
                    call_vcf = True
            if db_next:
                db_rec = db_next
                db_next = None
            else:
                if call_db:
                    db_rec = next(db_generator, None)
                    call_db = False
            if vcf_next:
                vcf_rec = vcf_next
                vcf_next = None
            else:
                if call_vcf:
                    vcf_rec = next(vcf_generator, None)
                    call_vcf = False
        self.sample_num += 1
        if self.in_sample_index == len(self.sample_list):
            self.all_num += 1
            self.sample_list.append(vcf.sample)
        else:
            # 原先由空缺样本的时候，会优先填充原空位置索引
            self.sample_list[self.in_sample_index] = vcf.sample
        self.all_records = all_records

    def delete(self, vcf: SV_vcf_base):
        """ 数据库内删除样本的数据 """
        # 数据库保护
        if vcf.sample not in self.sample_list:
            self.ic(f'样本不存在数据库，不用删除 {vcf.sample}')
            return
        if not self.locked:
            mark = f"{time.time()}-{vcf.sample}"
            self.__db_lock(mark)
        self.ic(f'删除样本 {vcf.sample}')
        self.in_sample_index = self._index(vcf.sample)
        all_records = []
        for rec in self.Iter():
            if str(self.in_sample_index) not in rec.Samples:
                all_records.append(rec)
                continue
            if rec.Count == 1:
                continue
            rec.Count -= 1
            rec.Samples = [s for s in rec.Samples if s != str(self.in_sample_index)]
            all_records.append(rec)
        self.sample_num -= 1
        self.sample_list[self.in_sample_index] = ''
        self.all_records = all_records

    def search(self, region: str, outfile: str= '', overlap: int=80):
        """ 查询区间 chr:start-end 内的 SV, 默认 overlap=80% """
        if ':' not in region or '-' not in region:
            self.ic(f"查询区间格式为: chr:start-end, [{region}] 不合规", 'ERROR')
            return
        self.ic(f'查询区域 SV: {region}, 交集比率：{overlap} %')
        region = region.upper().replace('CHR', '')
        res = ''
        sv_num = 0
        for rec in self.Iter():
            if self.call_overlap(rec, region, overlap):
                sv_num += 1
                freq = round(rec.Count / self.sample_num, 4)
                freq = f"{freq} ({rec.Count}/{self.sample_num})"
                samples = ",".join([self.sample_list[int(i)] for i in rec.Samples])
                # 样本太多时，隐藏
                if len(samples) > 150:
                    samples = f"{samples[:75]} ... {samples[-75:]}"
                res = f"{res}\n{rec.Chr1}\t{rec.Pos1}\t{rec.Pos2}\t{rec.Type}\t{freq}\t{samples}"
        if res:
            self.ic(f'查询到 {sv_num} 个 SV 记录')
            res = f"Chr\tStart\tEnd\tType\tFreq\tSamples\n{res.strip()}"
            if outfile:
                with open(outfile, 'w') as fo:
                    fo.write(res)
            print(res)
        else:
            self.ic(f'无匹配结果')

    def match_sv(self, Chr1, Pos1, Chr2, Pos2, Type) -> Rec:
        """ 返回匹配的 sv 信息 """
        for rec in self.Iter():
            if rec.Type == Type and rec.Chr1 == Chr1 and rec.Pos1 == Pos1 and \
                rec.Chr2 == Chr2 and rec.Pos2 == Pos2:
                return rec

    @staticmethod
    def call_overlap(rec: Rec, region: str, overlap: int) -> bool:
        """ 核对是否存在交集 """
        Chr1, Pos = region.split(':')
        if Chr1 != rec.Chr1 or rec.Chr1 != rec.Chr2:
            return False
        Pos1, Pos2 = Pos.split('-')
        if rec.Type == 'BND':
            # 5 bp 内断点认为可能是同一个断点
            if abs(int(Pos1) - int(rec.Pos1)) <= 5 and abs(int(Pos2) - int(rec.Pos2)) <= 5:
                return True
        # rec 完全在 region 内
        # if int(rec.Pos1) > int(Pos1) and int(rec.Pos2) < int(Pos2):
        #     return True
        if VCF.cal_overlap(int(rec.Pos1), int(rec.Pos2), int(Pos1), int(Pos2)) >= overlap/100:
            return True

    def write(self):
        """ 保存数据库结果 """
        # 数据库保护
        if not self.locked or (len(self.all_records) == 0 and self.all_num == 0):
            # 没有经过修改的数据库，不做重写
            self.ic(f'数据没有更新，不用再重新写入')
            return
        self.ic(f"保存数据库 <{self.db_file}>")
        with open(self.db_file, 'w') as db_out:
            if self.all_num:
                # 总体数量记录
                db_out.write(f"#<all_sample_rec={self.all_num};sample_num={self.sample_num};records_num={len(self.all_records)}>\n")
                part = 0
                for part in range(self.all_num // 100):
                    sample_num = len([s for s in self.sample_list[part*100:part*100+100] if s])
                    samples = ",".join(self.sample_list[part*100:part*100+100])
                    db_out.write(f"# sample_group_{part+1}=[all_num=100;sample_num={sample_num}]<{samples}>\n")
                else:
                    leftover = self.all_num % 100
                    part = part + 1 if part else part
                    if leftover:
                        sample_num = len([s for s in self.sample_list[-leftover:] if s])
                        samples = ",".join(self.sample_list[-leftover:])
                        db_out.write(f"# sample_group_{part+1}=[all_num={leftover};sample_num={sample_num}]<{samples}>\n")
                db_out.write(f"#Chr1\tPos1\tChr2\tPos2\tType\tCount\tSamples\n")
                all_rec = ''
                self.ic(f"共计 {self.sample_num} 例样本， {len(self.all_records)} 条记录")
                for rec in self.all_records:
                    if len(all_rec) > 100000:
                        db_out.write(all_rec.strip() + '\n')
                        all_rec = ''
                    all_rec = f"{all_rec}\n{rec.Chr1}\t{rec.Pos1}\t{rec.Chr2}\t{rec.Pos2}\t{rec.Type}\t{rec.Count}\t{','.join(rec.Samples)}"
                db_out.write(all_rec.strip())


if __name__ == '__main__':
    parse = argparse.ArgumentParser()
    parse.add_argument('--db_file', required=True)
    parse.add_argument('--vcf', default='')
    parse.add_argument('--sample', default='')
    parse.add_argument('--ana_dir', default='')
    parse.add_argument('--mode', default='upload', choices=['upload', 'delete', 'search'])
    parse.add_argument('--region', default='1:2337010-2337259')
    parse.add_argument('--outfile', default='./search.out')
    parse.add_argument('--overlap', default=80, type=int)
    usage = lambda *_:print(f"""
    使用: SV_db_tools.py [-h] --db_file DB_FILE [--vcf VCF] [--sample SAMPLE]
                        [--mode [upload,delete,search] [--region REGION]
                        [--outfile OUTFILE] [--overlap OVERLAP]
    
    功能:
    1. 上传 vcf 到数据库，带有自动更新功能（已存在数据库的样本会将其 SV 数据更新）:
        --mode upload --vcf vcf 文件地址 --sample 样本编号 --db_file 数据库文件地址

    2. 上传某批数据到数据库（已存在数据库的样本会将其 SV 数据更新）:
        --mode upload --ana_dir 分析批次的 SV 目录 --db_file 数据库文件地址

    3. 从数据库删除某个样本
        --mode delete --vcf vcf 文件地址 --sample 样本编号 --db_file 数据库文件地址
    
    4. 查询指定边界的 SV
        --mode search --region 1:2337010-2337259 --overlap 80 --db_file 数据库文件地址 [--outfile 查询结果保存文件]
    
    参数:
        --db_file DB_FILE                数据库文件地址
        --vcf VCF                        vcf 文件地址，多个以逗号分隔
        --sample SAMPLE                  样本编号，多个以逗号分隔
        --ana_dir ANA_DIR                批次的分析目录
        --mode 【upload,delete,search】  处理模式
        --region REGION                  查询区域
        --outfile OUTFILE                查询结果文件保存地址
        --overlap OVERLAP                变异一致性的重叠阈值

    """
    )
    parse.print_usage = usage
    parse.print_help = usage
    opts = parse.parse_args()
    db = SV_DB(opts.db_file)
    # print(opts.ana_dir)
    if opts.ana_dir and os.path.isdir(opts.ana_dir):
        in_vcfs: list = glob(f"{opts.ana_dir}/*/Discovery/all.vcf")
        samples: list = [v.split('/')[-3] for v in in_vcfs]
    else:
        in_vcfs = opts.vcf.split(',')
        samples = opts.sample.split(',')
    if opts.mode == 'upload':
        for in_vcf, sample in zip(in_vcfs, samples):
            # print(f"{sample}:{in_vcf}", sample)
            # 测试批次或非生产批次
            if '_test' in in_vcf:
                logging.info(f'vcf {in_vcf} 中包含 "_test" 字样，不上传人群数据库。')
                continue
            if '_reanalysis' in in_vcf:
                logging.info(f'vcf {in_vcf} 中包含 "_reanalysis" 字样，不上传人群数据库。')
                continue
            db.upload(SV_vcf_base(f"{sample}:{in_vcf}", sample))
        db.write()
    elif opts.mode == 'delete':
        for in_vcf, sample in zip(in_vcfs, samples):
            # 测试批次或非生产批次
            if '_test' in in_vcf:
                logging.info(f'vcf {in_vcf} 中包含 "_test" 字样，不上传人群数据库。')
                continue
            if '_reanalysis' in in_vcf:
                logging.info(f'vcf {in_vcf} 中包含 "_reanalysis" 字样，不上传人群数据库。')
                continue
            db.delete(SV_vcf_base(f"{sample}:{in_vcf}", sample))
        db.write()
    elif opts.mode == 'search':
        db.search(opts.region, opts.outfile, opts.overlap)
        
