import os, json
from .summary import read_summary4stat, Info


def __write_stat_json(outfile, filter_stat, info_all, info_hq, info_lq, distribution_step, distribution_endpoint):
    if os.path.exists(outfile):
        fp = open(outfile)
        data = json.loads(fp.read())
        fp.close()
    else:
        data = {
            'filter': filter_stat,
            'qc': {
                'all': info_all.stat,
                'hq': info_hq.stat,
                'lq': info_lq.stat
            },
            'hq_stat': {
                'nx0': info_hq.nx0,
                'distribution': {
                    'interval': info_hq.interval(distribution_step, distribution_endpoint),
                    'cumulative': info_hq.cumulative(distribution_step, distribution_endpoint)
                }
            },
            'all_stat': {
                'nx0': info_all.nx0,
                'distribution': {
                    'interval': info_all.interval(distribution_step, distribution_endpoint),
                    'cumulative': info_all.cumulative(distribution_step, distribution_endpoint)
                }
            }
        }
        fp = open(outfile, 'w')
        fp.write(json.dumps(data, indent=4))
        fp.close()
    return data


def __write_stat_tsv(outfile, stat, distribution_step):
    fp = open(outfile, 'w')
    fp.write('Statistics: read the summary file and filter the error format lines\n')
    fp.write('Total_reads\tSequence_reads\tMux_Scan_reads\tError_reads\n')
    fp.write('{:^11}\t{:^14}\t{:^14}\t{:^11}\n'.format(
        stat['filter']['summary']['total'], stat['filter']['summary']['sequence'],
        stat['filter']['summary']['mux_scan'], stat['filter']['summary']['error']
    ))
    fp.write('\n%s\n\n' % ('='*100))
    fp.write('Statistics: read the fastq file and filter the error format reads\n')
    fp.write('Raw_reads\tClean_reads\terror_reads\tFixed_reads\n')
    fp.write('{:^9}\t{:^11}\t{:^11}\t{:^11}\n'.format(
        stat['filter']['fastq']['raw'], stat['filter']['fastq']['clean'],
        stat['filter']['fastq']['error'], stat['filter']['fastq']['fixed']
    ))
    fp.write('\n%s\n\n' % ('=' * 100))
    fp.write('Statistics: read and stat the cleaned data\n')
    fp.write('Quality\tRead_number\tBase_number\tMax_length\tMin_length\tMean_length\tMedium_Length\tMean_qscore\n')
    fp.write('{:^7}\t{:^11}\t{:^11}\t{:^10}\t{:^10}\t{:^11}\t{:^13}\t{:^10}\n'.format(
        'All', stat['qc']['all']['read_num'], stat['qc']['all']['base_num'],
        stat['qc']['all']['max_len'], stat['qc']['all']['min_len'],
        '%.2f' % stat['qc']['all']['mean_len'], stat['qc']['all']['medium_len'],
        '%.2f' % stat['qc']['all']['mean_qscore']
    ))
    fp.write('{:^7}\t{:^11}\t{:^11}\t{:^10}\t{:^10}\t{:^11}\t{:^13}\t{:^10}\n'.format(
        'HQ', stat['qc']['hq']['read_num'], stat['qc']['hq']['base_num'],
        stat['qc']['hq']['max_len'], stat['qc']['hq']['min_len'],
        '%.2f' % stat['qc']['hq']['mean_len'], stat['qc']['hq']['medium_len'],
        '%.2f' % stat['qc']['hq']['mean_qscore']
    ))
    fp.write('{:^7}\t{:^11}\t{:^11}\t{:^10}\t{:^10}\t{:^11}\t{:^13}\t{:^10}\n'.format(
        'LQ', stat['qc']['lq']['read_num'], stat['qc']['lq']['base_num'],
        stat['qc']['lq']['max_len'], stat['qc']['lq']['min_len'],
        '%.2f' % stat['qc']['lq']['mean_len'], stat['qc']['lq']['medium_len'],
        '%.2f' % stat['qc']['lq']['mean_qscore']
    ))

    fp.write('\n%s\n\n' % ('=' * 100))
    fp.write('Statistics: HQ Nx0\n')
    fp.write('Level\tLength\tBase_number\tBase_percent\tRead_number\tRead_percent\n')
    for level in ['n%d0' % i for i in range(1, 10)]:
        if stat.get('hq_stat').get('nx0').get('base').get(level) is None: continue
        fp.write('{:^5}\t{:^6}\t{:^11}\t{:^12}\t{:^11}\t{:^12}\n'.format(
            level.upper(), stat['hq_stat']['nx0']['length'][level],
            stat['hq_stat']['nx0']['base'][level][0], '%.4f' % stat['hq_stat']['nx0']['base'][level][1],
            stat['hq_stat']['nx0']['read'][level][0], '%.4f' % stat['hq_stat']['nx0']['read'][level][1]
        ))
    fp.write('\n%s\n\n' % ('=' * 100))
    fp.write('Statistics: HQ read interval length distribution \n')
    fp.write('Length_interval\tRead_number\tRead_percent\n')
    for info in stat['hq_stat']['distribution']['interval']:
        fp.write('{:^15}\t{:^11}\t{:^12}\n'.format(info['name'], info['reads_count'], '%.4f' % info['rc_rate']))
    fp.write('Statistics: HQ read cumulative length distribution \n')
    fp.write('\n%s\n\n' % ('=' * 100))
    fp.write('Length_cumulative\tRead_number\tRead_percent\n')
    for info in stat['hq_stat']['distribution']['cumulative']:
        fp.write('{:^17}\t{:^11}\t{:^12}\n'.format(info['name'], info['reads_count'], '%.4f' % info['rc_rate']))

    fp.write('Statistics: All Nx0\n')
    fp.write('Level\tLength\tBase_number\tBase_percent\tRead_number\tRead_percent\n')
    for level in ['n%d0' % i for i in range(1, 10)]:
        fp.write('{:^5}\t{:^6}\t{:^11}\t{:^12}\t{:^11}\t{:^12}\n'.format(
            level.upper(), stat['all_stat']['nx0']['length'][level],
            stat['all_stat']['nx0']['base'][level][0], '%.4f' % stat['all_stat']['nx0']['base'][level][1],
            stat['all_stat']['nx0']['read'][level][0], '%.4f' % stat['all_stat']['nx0']['read'][level][1]
        ))
    fp.write('\n%s\n\n' % ('=' * 100))
    fp.write('Statistics: All read interval length distribution \n')
    fp.write('Length_interval\tRead_number\tRead_percent\n')
    for info in stat['all_stat']['distribution']['interval']:
        fp.write('{:^15}\t{:^11}\t{:^12}\n'.format(info['name'], info['reads_count'], '%.4f' % info['rc_rate']))
    fp.write('Statistics: All read cumulative length distribution \n')
    fp.write('\n%s\n\n' % ('=' * 100))
    fp.write('Length_cumulative\tRead_number\tRead_percent\n')
    for info in stat['all_stat']['distribution']['cumulative']:
        fp.write('{:^17}\t{:^11}\t{:^12}\n'.format(info['name'], info['reads_count'], '%.4f' % info['rc_rate']))
    fp.close()


def run_stat(outdir, summary4stat_tsv, filter_stat, qscore_cutoff, distribution_step, distribution_endpoint):
    stat_json = os.path.join(outdir, 'qc_stat.json')
    stat_tsv = os.path.join(outdir, 'qc_stat.tsv')
    info_all, info_hq, info_lq = read_summary4stat(summary4stat_tsv, qscore_cutoff)
    stat = __write_stat_json(stat_json, filter_stat, info_all, info_hq, info_lq, distribution_step, distribution_endpoint)
    __write_stat_tsv(stat_tsv, stat, distribution_step)