# -*- coding:utf-8 -*-
import collections
from utils.metrics import *

LENGTH_SIZE = 6


def get_length_index(length):
    if length <= 5:
        return 0
    elif length <= 10:
        return 1
    elif length <= 15:
        return 2
    elif length <= 20:
        return 3
    elif length <= 25:
        return 4
    else:
        return 5


def load_result_data(path):
    result = collections.defaultdict(list)
    with open(path, 'r', encoding='utf-8') as fr:
        for line in fr:
            ss = line.rstrip().split("\t")
            label, query, candidate, score1, score2, score3 = \
                int(ss[0]), ss[1], ss[2], float(ss[3]), float(ss[4]), float(ss[5])
            result[ss[1]].append((label, query, candidate, score1, score2, score3))
    return result


def analyze_length():
    samples = load_result_data("../data/stc/stc.train.res.txt")

    queries = samples.keys()
    size = len(queries)
    print('query number: {}'.format(len(queries)))
    query_lengths = [len(q) for q in queries]
    print('mean lengths: {}'.format(sum(query_lengths) / len(query_lengths)))

    min_length, max_length = min(query_lengths), max(query_lengths)
    print("max length: {}".format(max_length))
    print("min length: {}".format(min_length))

    sorted_lengths = sorted(query_lengths)
    print(sorted_lengths[int(0.2 * size) - 1])
    print(sorted_lengths[int(0.4 * size) - 1])
    print(sorted_lengths[int(0.6 * size) - 1])
    print(sorted_lengths[int(0.8 * size) - 1])
    print(sorted_lengths[int(1.0 * size) - 1])

    print('=' * 100)
    model_eval_result = collections.defaultdict(list)

    for k in range(3):
        eval_length_result = dict()
        for eval_name in EVAL_NAMES:
            eval_length_result[eval_name] = [[] for _ in range(LENGTH_SIZE)]

        for query in queries:
            g_samples = samples[query]
            idx = get_length_index(len(query))
            labels = [e[0] for e in g_samples]
            preds = [e[3 + k] for e in g_samples]

            res = run_rank_evaluate(labels, preds, groups=[len(g_samples)])
            for key in eval_length_result:
                eval_length_result[key][idx].append(res[key])

        for key in eval_length_result:
            result = eval_length_result[key]
            scores = [sum(s) / len(s) for s in result]

            model_eval_result[key].append(scores)

            score_str = "\t".join(map(str, scores))
            print("{}\t{}".format(key, score_str))

        print('\n')


def analyze_statistics():
    samples = collections.defaultdict(list)
    with open(r"C:\Project\text-match-stc\data\stc\stc2-repos-all.txt", 'r', encoding="utf-8") as fr:
        for line in fr:
            ss = line.rstrip().split('\t')
            assert len(ss) == 2
            samples[ss[0]].append(ss[1])
    print(len(samples))
    cmnt_nums = [len(samples[key]) for key in samples]
    print(sum(cmnt_nums) / len(cmnt_nums))
    sorte_nums = sorted(cmnt_nums, reverse=True)
    print(sorte_nums[0])

    def _get_more_k_nums(nums, k):
        count = 0
        for n in nums:
            if n > k:
                count += 1
        return count

    print(_get_more_k_nums(cmnt_nums, 100))
    print(_get_more_k_nums(cmnt_nums, 200))


def analyze_repos_lengths():
    responses = set([])
    with open(r"C:\Project\text-match-stc\data\stc\stc2-repos-all.txt", 'r', encoding="utf-8") as fr:
        for line in fr:
            ss = line.rstrip().split('\t')
            assert len(ss) == 2
            responses.add(ss[1])
    print(len(responses))
    lengths = [len(r) for r in responses]
    print('mean length', sum(lengths) / len(lengths))

    sorted_lengths = sorted(lengths)
    print(sorted_lengths[int(0.8 * len(sorted_lengths)) - 1])
    print(sorted_lengths[int(0.9 * len(sorted_lengths)) - 1])

    def less_than_k(nums, k):
        count = 0
        for n in nums:
            if n < k:
                count += 1
        print(k, count, count / len(nums))

    less_than_k(sorted_lengths, k=30)
    less_than_k(sorted_lengths, k=20)


def study_cases():
    samples = load_result_data("../data/stc/stc.train.res.txt")
    eval_result = collections.defaultdict(dict)
    for k in range(3):
        for query in samples:
            g_samples = samples[query]
            labels = [e[0] for e in g_samples]
            preds = [e[3 + k] for e in g_samples]

            res = run_rank_evaluate(labels, preds, groups=[len(g_samples)])

            eval_result[query][k] = res['nDCG@1']#*2+res['nDCG@3']

    def print_samples(key):
        g_samples = samples[key]
        for _ in range(3):
            preds = [(e[3 + _], e[:3]) for e in g_samples]
            sorted_preds = sorted(preds, key=lambda e: e[0], reverse=True)
            print("="*100)
            print("model_{}".format(_+1))
            for (score, e) in sorted_preds[:3]:
                print("{}\t{}\t{}\t{}".format(score, e[0], e[1], e[2]))
    sorted_results = sorted(eval_result.items(), key=lambda r: (2*r[1][0]-r[1][1]-r[1][2])*2+r[1][1]-r[1][2], reverse=True)

    for query, res in sorted_results[:10]:
        print("\n")
        print(query, res)
        print_samples(query)


if __name__ == '__main__':
    # analyze_statistics()
    # analyze_repos_lengths()
    # analyze_length()

    study_cases()
    pass
