import os, re, logging, sys, gzip, json

logger = logging.getLogger('NextApp-ONT-QC')

from nextapp.ont_qc.config import COL_VARTYPES, COLNAMES


def __check_head(summary_files):
    '''
    扫描所有summary文件，若在任一文件存在表头信息：
        若存在，则检测表头关键信息是否存在：
            若存在，则返回return
            否则，报错并退出
        否则，报错并退出
    :param summary_files: 所有summary文件列表
    :return: 表头信息及关键列信息
    '''
    for summary_file in summary_files:
        fp = open(summary_file)
        for line in fp:
            line = line.strip()
            if not line: continue
            if line.startswith('filename') or line.startswith('read'):
                head_field = line.split('\t')
                #colnames = BARCODE_COLNAMES if is_barcode else COLNAMES
                colnames = COLNAMES
                indexes = list()
                for colname in colnames:
                    try:
                        indexes.append(head_field.index(colname))
                    except:
                        logger.error('表头信息中没有"%s"字段: %s' % (colname, line))
                        sys.exit()
                return (head_field, indexes)
        logger.error('没有找到Summary表头: %s' % summary_file)
        sys.exit()


def __check_colnum(field, head, line_info):
    '''
    当前行可能缺少某些列，导致列数与表头信息错位
    :param field: 当前行，split list信息
    :param head: 表头，split list 信息
    :param line_info: 当前行信息
    :return: 检测是否通过
    '''
    if len(field) != len(head):
        logger.warning('行错误: 当前行列数与表头列数不匹配，%s' % line_info)
        return False
    return True


def __check_vartype(picks, line_info):
    '''
    当前行可能缺少某些列，导致列数与表头信息错位, 从而导致数据类型不一致，可作为__check_colnum补充
    :param picks: COLNAMES所在列数据
    :param line_info: 当前行信息
    :return: 检测是否通过
    '''
    for i in range(len(picks)):
        if isinstance(COL_VARTYPES[i](), int):
            if not re.match(re.compile(r'^-*\d+$'), picks[i]):
                logger.warning('行错误: 变量类型不匹配, int:%s, %s' % (picks[i], line_info))
                return False
        if isinstance(COL_VARTYPES[i](), float):
            if not re.match(re.compile(r'^-*\d+\.\d+$'), picks[i]):
                logger.warning('行错误: 变量类型不匹配, float:%s, %s' % (picks[i], line_info))
                return False
    return True


def __filter(summary_files, head, indexes, out_summary_file):
    fp_out = gzip.open(out_summary_file, 'wb')
    fp_out.write(('%s\n' % '\t'.join(head)).encode())
    total, sequence, mux_scan, error = 0, 0, 0, 0
    for summary_file in summary_files:
        fp = open(summary_file)
        line_count = 0
        for line in fp:
            line_count += 1
            line = line.strip()
            line_info = '位于%s文件第%d行' % (summary_file, line_count)
            if not line or line.startswith('filename') or line.startswith('read'): continue
            total += 1
            field = line.split('\t')
            check = __check_colnum(field, head, line_info)
            if not check:
                error += 1
                continue
            picks = [field[i] for i in indexes]
            if not __check_vartype(picks, line_info):
                error += 1
                continue
            if line.find('_mux_scan_') != -1:
                mux_scan += 1
                continue
            if line.find('_sequencing_run_') != -1:
                sequence += 1
                fp_out.write(('%s\n' % line).encode())
        fp.close()
    fp_out.close()
    if total != sequence + mux_scan + error:
        logger.warning('total reads: %d != sequence reads: %d + mux_scan reads: %d + error: %d' % (total, sequence, mux_scan, error))
    return {
        'total': total,
        'sequence': sequence,
        'mux_scan': mux_scan,
        'error': error,
    }


def run_filter_summary(cell_dir, out_summary_file):
    summary_files = list()
    for root, parent, filenames in os.walk(cell_dir):
        for filename in filenames:
            if not re.match(re.compile(r'sequencing_summary_\w+\.txt'), filename): continue
            summary_file = os.path.join(root, filename)
            summary_files.append(summary_file)
    head, indexes = __check_head(summary_files)
    return __filter(summary_files, head, indexes, out_summary_file)
