#! /usr/bin/python
import argparse
import os
import random
from collections import namedtuple

import numpy as np
from sklearn.metrics import roc_auc_score
import gzip

num_sites = [100000, ]
CallRecord = namedtuple('CallRecord', ['chrom', 'pos', 'strand',
                                       'holeid', 'loc', 'depth',
                                       'prob0', 'prob1',
                                       'predicted_label',
                                       'is_true_methylated'])

def sample_sites_ont(filenames, is_methylated, probcf):
    all_crs = list()
    skip_cnt = 0
    cnt = {}
    group_dict = {}
    for filename in filenames:
        if filename.endswith(".gz"):
            infile = gzip.open(filename, 'rt')
        else:
            infile = open(filename)
        for line in infile:
            
            words = line.strip().split("\t")
            sampid = "\t".join([words[0], words[1], words[2], words[3], words[4], words[5]])
            
            prob0, prob1 = float(words[6]), float(words[7])
            key = tuple(words[:3]+[words[4]])  # 使用前三列+read id作为键
            if key in cnt:
                cnt[key] += 1
            else:
                cnt[key] = 1
            
            
            if key not in group_dict:
                group_dict[key] = [prob0, prob1, 1]  # [第7列总和, 第8列总和, 计数]
            else:
                #cnt[key] -= 1
                group_dict[key][0] += prob0  # 累加第7列的值
                group_dict[key][1] += prob1  # 累加第8列的值
                group_dict[key][2] += 1  # 计数加1
            
        infile.close()
    result = []
    # 对字典中的每个键值对进行处理，计算平均值
    for key, value in group_dict.items():
        prob0 = value[0] / value[2]  # 计算第7列的平均值
        prob1 = value[1] / value[2]  # 计算第8列的平均值
        if abs(prob0 - prob1) < probcf:
            skip_cnt+=1
            continue
        
        if prob1>prob0:
            label=1
        else:
            label=0

        result.append(CallRecord(key[0], int(key[1]),
                                  key[2], "-", -1, -1,
                                  float(prob0), float(prob1),
                                  int(label),
                                  is_methylated))
                
    print('there are {} cpg candidates totally, {} cpgs kept, {} cpgs left'.format(len(cnt.keys()),
                                                                                   len(result),
                                                                                   skip_cnt))   

    return result




if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Calculate call accuracy stats of ccsmeth for cpgs')
    parser.add_argument('--unmethylated', type=str, action="append",required=True)
    parser.add_argument('--methylated', type=str, action="append",required=True)
    parser.add_argument('--depth_cf', action="append", required=True)
    parser.add_argument('--prob_cf', action="append", required=True,
                        help='this is to remove ambiguous calls. '
                             'if abs(prob1-prob0)>=prob_cf, then we use the call. e.g., proc_cf=0 '
                             'means use all calls. range [0, 1].')
    parser.add_argument('--round', type=int, default=5, required=False,
                        help="number of repeated tests for random sampling")
    parser.add_argument('--result_file', type=str, required=False,
                        help='the result file are going to save')
    parser.add_argument('--seed', type=int, default=1234, help="seed")
    
    args = parser.parse_args()

    random.seed(args.seed)

    depth_cfs = args.depth_cf
    prob_cfs = args.prob_cf

    result_file = os.path.abspath(args.result_file)
    pr_writer = open(result_file, 'w')
    pr_writer.write("tested_type\tTP\tFN\tTN\tFP\t"
                    "accuracy\trecall\tspecificity\tprecision\t"
                    "fallout\tmiss_rate\tFDR\tNPV\tAUC\tsamplenum\tdepth_cutoff\tprob_cf\tnum_rounds\n")

    for depth_cf in depth_cfs:
        for prob_cf in prob_cfs:
            unmethylated_sites = sample_sites_ont(args.unmethylated, False, float(prob_cf))
            methylated_sites = sample_sites_ont(args.methylated, True, float(prob_cf))

            for site_num in num_sites:
                num_rounds = args.round
                if site_num >= len(methylated_sites) and site_num >= len(unmethylated_sites):
                    num_rounds = 1
                metrics = []
                for roundidx in range(num_rounds):
                    random.shuffle(methylated_sites)
                    random.shuffle(unmethylated_sites)
                    tested_sites = methylated_sites[:site_num] + unmethylated_sites[:site_num]

                    tp = 0
                    fp = 0
                    tn = 0
                    fn = 0

                    called = 0
                    correct = 0

                    y_truelabel = []
                    y_scores = []

                    for s in tested_sites:
                        tp += s.predicted_label and s.is_true_methylated
                        fp += s.predicted_label and not s.is_true_methylated
                        tn += not s.predicted_label and not s.is_true_methylated
                        fn += not s.predicted_label and s.is_true_methylated

                        y_truelabel.append(s.is_true_methylated)
                        y_scores.append(s.prob1)

                    print(tp, fn, tn, fp)
                    precision, recall, specificity, accuracy = 0, 0, 0, 0
                    fall_out, miss_rate, fdr, npv, = 0, 0, 0, 0
                    auroc = 0
                    if len(tested_sites) > 0:
                        accuracy = float(tp + tn) / len(tested_sites)
                        if tp + fp > 0:
                            precision = float(tp) / (tp + fp)
                            fdr = float(fp) / (tp + fp)  # false discovery rate
                        else:
                            precision = 0
                            fdr = 0
                        if tp + fn > 0:
                            recall = float(tp) / (tp + fn)
                            miss_rate = float(fn) / (tp + fn)  # false negative rate
                        else:
                            recall = 0
                            miss_rate = 0
                        if tn + fp > 0:
                            specificity = float(tn) / (tn + fp)
                            fall_out = float(fp) / (fp + tn)  # false positive rate
                        else:
                            specificity = 0
                            fall_out = 0
                        if tn + fn > 0:
                            npv = float(tn) / (tn + fn)  # negative predictive value
                        else:
                            npv = 0
                        auroc = roc_auc_score(np.array(y_truelabel), np.array(y_scores))
                    metrics.append([tp, fn, tn, fp, accuracy, recall, specificity, precision,
                                    fall_out, miss_rate, fdr, npv, auroc, len(tested_sites)])
                print("")
                # cal mean
                metrics = np.array(metrics, dtype=float)

                metrics_mean = np.mean(metrics, 0)
                mean_tpfntnfp = "\t".join([str(round(x, 1)) for x in metrics_mean[:4]])
                mean_perf = "\t".join([str(round(x, 4)) for x in metrics_mean[4:13]])
                mean_numlen = str(round(metrics_mean[13]))
                pr_writer.write("\t".join([str(site_num), mean_tpfntnfp, mean_perf, mean_numlen,
                                           str(depth_cf), str(prob_cf), str(num_rounds)]) + "\n")

                metrics_std = np.std(metrics, 0)
                std_tpfntnfp = "\t".join([str(round(x, 1)) for x in metrics_std[:4]])
                std_perf = "\t".join([str(round(x, 4)) for x in metrics_std[4:13]])
                std_numlen = str(round(metrics_std[13]))
                pr_writer.write("\t".join([str(site_num) + "_std", std_tpfntnfp, std_perf, std_numlen,
                                           str(depth_cf), str(prob_cf), str(num_rounds)]) + "\n")
                pr_writer.flush()

    pr_writer.close()
