# import Learn
import os
import random
import shutil
import numpy as np

# 对模型进行评估
# 包含precision, recall, f1


global_tags = {'字符串', '链表', '快速幂', '回溯', '矩阵快速幂', '逆元', '递归', '字典树', '队列', '哈希', '位运算', '动态规划', '堆', '双指针', '模拟', '数学', '后缀数组', '树', 'bfs', '穷举', '拓扑排序', '二分', '高级结构', '优先队列', '计算几何', '欧拉函数', '三分', '高级算法', '查找', '语法题', '分治', '思维', '图', '排序', '贪心', '前缀和', '搜索', 'dfs', '栈', '复杂度', '数组'}






# 首先要把train目录下的一些文件拿到predict下面
def splitFile(ratio:float):
    all_files = os.listdir("train/")
    all_txt_files = []

    for file in all_files:
        if file[-3:] == 'txt':
            all_txt_files.append(file[:-3])

    out_files = random.sample(all_txt_files, int(len(all_txt_files)*ratio))

    for file in out_files:
        shutil.move("train/"+file+'txt',"predict/")
        shutil.move("train/"+file+'lab',"predict/")

# 还原, 把predict里面的都放到train下面
def restore():
    all_file = os.listdir("predict/")
    for file in all_file:
        shutil.move("predict/"+file,"train/")

# 评估(precision,recall,f1)
# 阈值: 超过多少这个就是我们要推荐的标签
def estimate(model, threshold:float):
    all_files = os.listdir("predict/")
    all_txt_files = []

    for file in all_files:
        if file[-3:] == 'txt':
            all_txt_files.append(file[:-3])


    common = 0
    preds = 0
    actuals = 0


    for file in all_txt_files:
        pred_all = model.predict_from_file("predict/"+file+"txt")

        # 预测到的标签
        pred = set()
        for p in pred_all[:]:
            if p[1] >= threshold:
                pred.add(p[0])
            else:
                break


        # 实际的标签
        actual = set()
        with open("predict/"+file+"lab",'r',encoding="utf-8") as f:
            tags = f.readlines()
            for tag in tags:
                actual.add(tag.strip())


        # print("->"*10)
        # print(pred)
        # print(actual)
        # print("<-"*10)

        common+=len(pred&actual)
        preds+=len(pred)
        actuals+=len(actual)

    precision = common/actuals
    recall = common/preds
    f1 = (precision*recall)/(precision+recall)

    print("平均推荐长度:{}".format(preds/len(all_txt_files)))
    print("common:{}".format(common))
    print("precision:{}".format(precision))
    print("recall:{}".format(recall))
    print("f1:{}".format(f1))

    return precision,recall,f1



################
# 计算baseline #
###############
# 看一下真实的tag中,最大的tag数量是多少
# 经过运行,发现是5
def getTagN():
    all_files = os.listdir("predict/")

    all_lab_files = []
    for file in all_files:
        if file[-3:] == "lab":
            all_lab_files.append(file)

    max_n = 0

    for lab_file in all_lab_files:
        with open("predict/"+lab_file,'r',encoding="utf8") as f:
            current_n = len(f.readlines())
            max_n = max(max_n, current_n)

    print("最多的标签数量为:{}".format(max_n))
    return max_n


# 计算随机情况下的各个指标
def getBaseLine(max_n:int):
    all_files = os.listdir("predict/")
    all_txt_files = []

    for file in all_files:
        if file[-3:] == 'txt':
            all_txt_files.append(file[:-3])


    common = 0
    preds = 0
    actuals = 0


    for file in all_txt_files:

        # 预测到的标签
        k = random.randint(1,max_n)
        pred = set(random.sample(global_tags, k))


        # 实际的标签
        actual = set()
        with open("predict/"+file+"lab",'r',encoding="utf-8") as f:
            tags = f.readlines()
            for tag in tags:
                actual.add(tag.strip())

        #
        # print(">"*10)
        # print(pred)
        # print(actual)
        # print("<"*10)

        common+=len(pred&actual)
        preds+=len(pred)
        actuals+=len(actual)

    precision = common/actuals
    recall = common/preds
    f1 = (precision*recall)/(precision+recall)


    print("common:{}".format(common))
    print("precision:{}".format(precision))
    print("recall:{}".format(recall))
    print("f1:{}".format(f1))

    return precision, recall, f1


if __name__ == '__main__':
    ### 1.取出train/目录下的一些文件,放到predict/下,当作训练集
    # splitFile(0.1)

    ### 2.恢复: 文件全部放回来
    # restore()

    ### 3.获取模型
    # model = Learn.trainNSave("train/")
    # model = Learn.load()


    #############################
    #  4.不同的阈值下的的评测指标   #
    # <<<<<<<<<<<<<<<<<<<<
    # 0.002
    # common:177
    # precision:0.8009049773755657
    # recall:0.052366863905325446
    # f1:0.049153013051930025
    # >>>>>>>>>>>>>>>>>>>>
    #
    #############################
    # ps = []
    # rs = []
    # f1s = []
    # for i in range(3,10):
    #     print("<"*20)
    #
    #     ii = i*0.01
    #     print(ii)
    #
    #     p,r,f1 = estimate(model,ii)
    #
    #     ps.append(p)
    #     rs.append(r)
    #     f1s.append(f1)
    #
    #     print(">"*20)
    #
    # print("argmax precision:{}".format(np.array(ps).argmax()))
    # print("max precision:{}".format(np.array(ps).max(initial=0)))
    #
    # print("argmax f1:{}".format(np.array(f1s).argmax()))
    # print("max f1:{}".format(np.array(f1s).max(initial=0)))
    #
    # print(ps)
    # print(rs)
    # print(f1s)






    #######################
    # 5.计算baseline       #
    #   precision: 0.05   #
    #   f1: 0.02          #
    #######################
    # max_n = getTagN()

    for i in range(10,4,-1):
        print("<"*20)
        print("平均推荐长度:{}".format(i))
        getBaseLine(i)
        print(">"*20)



