import gzip, os, logging, sys, json, hashlib
from .summary import run_filter_summary
from .fastq import run_filter_fastq
from nextapp.ont_qc.config import COLNAMES

logger = logging.getLogger('NextApp-ONT-QC')


def md5sum(fname):
    md5_value = hashlib.md5()
    with open(fname, 'rb') as fp:
        while True:
            data_flow = fp.read(8096)
            if not data_flow:
                break
            md5_value.update(data_flow)
    fp.close()
    return md5_value.hexdigest()


def get_md5_json(report_dir):
    md5_dict = {}
    for cell_file in os.listdir(report_dir):
        path = os.path.join(report_dir, cell_file)
        md5_dict[cell_file] = md5sum(os.path.join(report_dir, cell_file))
    fp = open(os.path.join(report_dir, 'md5.json'), 'w')
    fp.write(json.dumps(md5_dict, indent=4))
    fp.close()


def __summary4stat(summary_file, fastq, out_summary4stat_file):
    head, reads_dict = list(), dict()
    fp = gzip.open(summary_file, 'rb')
    for line in fp:
        line = line.decode().strip()
        if not line: continue
        field = line.split('\t')
        if line.startswith('filename') or line.startswith('read'):
            head = field
            continue
        read_id = field[head.index(COLNAMES[0])]
        seq_len = field[head.index(COLNAMES[1])]
        qscore = field[head.index(COLNAMES[2])]
        reads_dict[read_id] = {'read_id': read_id, 'seq_len': int(seq_len), 'qscore': float(qscore)}
    fp.close()
    fp = gzip.open(fastq, 'rb')
    fp_out = gzip.open(out_summary4stat_file, 'wb')
    fp_out.write(('%s\n' % '\t'.join(COLNAMES[0:3])).encode())
    while True:
        title = fp.readline().decode().strip()
        sequence = fp.readline().decode().strip()
        fp.readline()
        fp.readline()
        if not title and not sequence: break
        if not title or not sequence:
            logger.error('fastq 错误: title 或 sequence不存在')
            logger.error(title if title else sequence)
            sys.exit()
        if not title.startswith('@') or title.find('runid=') == -1:
            logger.error('fastq 错误: title不符合格式')
            logger.error(title)
            sys.exit()
        read_id = title.split(' ')[0][1:]
        info = reads_dict.get(read_id, {'read_id': read_id, 'seq_len': len(sequence), 'qscore': -1.0})
        fp_out.write(('%s\t%d\t%f\n' % (info.get('read_id'), info.get('seq_len'), info.get('qscore'))).encode())
    fp.close()
    fp_out.close()


def run_filter(cell_dir, outdir, prefix, thread):
    out_fastq = os.path.join(outdir, '%s.fastq.gz' % prefix)
    out_summary_file = os.path.join(outdir, '%s.sequencing_summary.txt.gz' % prefix)
    out_stat_json_file = os.path.join(outdir, 'tmp.filter.stat.json')
    out_summary4stat_file = os.path.join(outdir, '%s.summary4stat.tsv.gz' % prefix)
    if os.path.exists(out_stat_json_file):
        fp = open(out_stat_json_file)
        filter_stat = json.loads(fp.read())
        fp.close()
        logger.info('统计文件%s已存在，跳过过滤' % out_stat_json_file)
    else:
        filter_summary_stat = run_filter_summary(cell_dir, out_summary_file)
        filter_fastq_stat = run_filter_fastq(cell_dir, out_fastq, thread)
        filter_stat = {'summary': filter_summary_stat, 'fastq': filter_fastq_stat}
        fp = open(out_stat_json_file, 'w')
        fp.write(json.dumps(filter_stat, indent=4))
        fp.close()
    if not os.path.exists(out_summary4stat_file):
        __summary4stat(out_summary_file, out_fastq, out_summary4stat_file)
    return out_summary_file, out_fastq, out_summary4stat_file, filter_stat
