import os
import logging
import json
import numpy as np
from api import utils, models
from api.serial import mould, sample
from api.serial.detect import detect_sample
from detecter import settings

from api.serial.cluster import NegativeAnalyzer
from detecter.settings import Collection_frequency

logger = logging.getLogger('requestHandler')


# 对检测出错的样本进行反馈来更新阈值，不处理负样本标签地误标记, unassigned_file也先不做处理
# by zxy on 20200103
def feedback_to_update_threshold(db_mould, current_mould, sample_id, sample_type):
    logger.info("进行反馈中...")
    good_file, sample_path, rejects_file = current_mould.good_file, \
                                           current_mould.sample_path, current_mould.rejects_file
    try:
        sample_file = models.Sample.objects.get(sample_id=sample_id, mould=db_mould)
    except:
        return utils.R.error('该样本未找到')
    sample_times = sample_file.times
    origin_threshold = db_mould.threshold
    new_threshold = origin_threshold
    sample_file.state = 3  # 已反馈
    difference = float(sample_times) - origin_threshold

    logger.info(str(sample_type))
    logger.info(str(difference))
    # 本该是正样本却检测为负样本，需要加大阈值
    if sample_type == "positive":
        logger.info("flag 0")
        if difference > 0:
            new_threshold = origin_threshold + difference * 0.5
        else:
            new_threshold = origin_threshold
        logger.error(f'{sample_id} 系统将品质正常误判成不良品')
        # mould.CurrentMould.pos_feedback()
        utils.restore_sample(rejects_file, good_file, sample_id)
        # mould.CurrentMould.pos_feedback()
        db_mould.pos_increment()  # 正样本+1
        db_mould.neg_reduce()  # 负样本-1
        sample_file.label_id = 2

    # 本该是负样本却检测为正样本，需要减小阈值
    elif sample_type == "negative":
        if difference < 0:
            new_threshold = origin_threshold + difference * 1.1
        else:
            new_threshold = origin_threshold
        logger.info(f'{sample_id},不良品误判成品质正常')
        utils.restore_sample(good_file, rejects_file, sample_id)
        # 网络
        # mould.CurrentMould.neg_feedback()
        db_mould.neg_increment()  # 负样本+1
        db_mould.pos_reduce()  # 正样本-1
        sample_file.label_id = 1
    else:
        return utils.R.error(f'id为{sample_id}的样本不符合负反馈规则，请检查该样本是否需要误判反馈')

    if new_threshold < 3.0:
        new_threshold = 3.0
    db_mould.threshold = new_threshold
    db_mould.save()

    sample_file.save(force_update=True)
    logger.info("flag 1")

    return utils.R.ok()


# 负样本聚类处理方法 by zxy on 20200108
# 1.{异常现象1{异常原因1，异常原因2...}, 异常现象2{异常原因1，异常原因2...}, }
# 也就是最多需要 20(现象) * 20(原因) = 400个画像
# 2. 数据库中NegFigure表需要更改结构：{id, mould_code, label_id(异常现象), figure_reason3, count_reason3,
#                                       figure_reason4, count_reason4, ..., }
# 3. 待聚类负样本数量 = 总的负样本数量 - 所有已经存放在figure文件里的负样本数量之和(label1{reason1+reason2+..})
#    是否更新负样本画像也在这一步
# 4. 返回的封装数据中包括当前的判断阈值，待聚类的负样本数量，负样本可能性(如果没有则设为空数组)
# 5. 聚类算法的出口是几个样本id数组，之后再经过专门的处理函数来封装发给前端
# 6. 单个负样本生产信息(位置，压力，速度图像)的请求

# 第一部分：没有获得所有标签的画像
#           consumer记录当前待聚类的负样本数量，到达一定数量后运行聚类算法（暂时设置为主动触发），封装的返回
#           数据需要当前判断阈值，待聚类的负样本数量，是否可以显示原因可能性的标志 0

# 两种模式：1. 返回聚类结果(sample_id数组)，聚类中心画像
#          2. 请求时需要模式1的聚类结果(sample_id数组)
def assign_cluster(db_mould, mode, data):
    # data = {
    #     "cluster_result": [
    #         {"cluster_id": 1, "label_id": 3, "reason_id": 9, "cluster_list": [1, 2]},
    #         {"cluster_id": 2, "label_id": 4, "reason_id": 10, "cluster_list": [3]},
    #         {"cluster_id": 3, "label_id": 12, "reason_id": 3, "cluster_list": [4, 5, 6]}
    #     ],
    #     "time": ""
    # }
    unassigned_file = os.path.join(settings.BASE_DIR, 'data', 'sample', db_mould.mould_code,
                                   'temp', 'unassigned.csv')
    rejects_file = os.path.join(settings.BASE_DIR, 'data', 'sample', db_mould.mould_code,
                                'rejects.csv')
    if not os.path.exists(unassigned_file) or not os.path.exists(rejects_file):
        return utils.R.error("路径不存在，请重新初始化")

    if mode == "image":
        # 通过遍历文件收集的原始sample list
        sample_list = []
        period_len_list = []
        # 从sample list处理后维度一样的point list
        point_dict = {}
        with open(unassigned_file, 'r') as reader:
            json_list = reader.readlines()
            id = 0
            # 同figure建模一样，选择周期最短那组数据的维数为基准
            for json_str in json_list:
                json_obj = json.loads(json_str)
                period_len_list.append(min(len(json_obj['actual_pressure']),
                                           len(json_obj['actual_location']),
                                           len(json_obj['actual_speed'])))
                id = int(json_obj['id'])
                # 对sample类增加了通过读取file还原出实例的功能，是考虑到后续的操作
                sample_file = sample.ProductSample(id, frame_list=period_len_list, is_load=True, json_obj=json_obj)
                sample_list.append(sample_file)
        N = np.min(period_len_list)
        sample_index = np.array(list(range(0, N, Collection_frequency))).astype('int32')
        point_feature = []
        for sample_file in sample_list:
            info = sample_file.get_userful_info_dic()
            id = info['id']
            ch1_arr = np.array(info['actual_pressure'])
            ch2_arr = np.array(info['actual_location'])
            speed_arr = np.array(info['actual_speed'])

            feature = np.concatenate([ch1_arr[sample_index], ch2_arr[sample_index], speed_arr[sample_index]])
            point_dict.update({id: feature})
            point_feature.append(feature)

        point_feature = np.array(point_feature)
        # mean = np.mean(point_feature, axis=0)
        # var = np.var(point_feature, axis=0)

        # 参数整定，主要是Eps
        # 首先获得两两样本之间的距离
        MinPts = settings.DBSCAN_MINPts
        distance_list = []
        for P in point_dict.keys():
            for Pn in point_dict.keys():
                if P > Pn:
                    distance_list.append(np.linalg.norm(point_dict[P] - point_dict[Pn]))
        distance_list.sort()
        # logger.info(str(distance_list))

        # distance_list是样本两两之间特征向量的欧式距离，不包括自身与自身的
        Eps_list = []
        Cf_list = []
        for i in range(1, 10):
            index = len(distance_list) / 10 * i
            # 是整数
            if index - int(index) == 0:
                Eps_list.append(distance_list[int(index)])
            # 比如说20.5，取20与21对应距离的平均值
            else:
                Eps_list.append((distance_list[int(index) + 1] + distance_list[int(index)]) / 2)
        # logger.info(Eps_list)

        for eps in Eps_list:
            # point_dict 就相当于是的NegativeAnalyzer类中的“D”
            negAnalyzer = NegativeAnalyzer(point_dict)
            isMultipleCluster = negAnalyzer.MyDBSCAN(eps, MinPts)
            negAnalyzer.updateIn()
            # 如果聚类结果有两个及以上的类别
            if isMultipleCluster:
                negAnalyzer.updateCluster()
                R = negAnalyzer.PRank(settings.PRANK_LAMDA, settings.PRANK_C, settings.PRANK_K)
                cf = negAnalyzer.evaluate(R)
                Cf_list.append(cf)
            # 如果聚类数小于2，则没有必要评估compactness
            else:
                Cf_list.append(0)

        logger.info("10%-90% Cf的结果")
        logger.info(str(Cf_list))
        index = 0
        i = 0
        minCf = Cf_list[0]
        for cf in Cf_list:
            if minCf > cf > 0:
                minCf = cf
                index = i
            i += 1
        negAnalyzer = NegativeAnalyzer(point_dict)
        negAnalyzer.MyDBSCAN(Eps_list[index], MinPts)
        negAnalyzer.updateIn()
        negAnalyzer.updateCluster()
        logger.info(negAnalyzer.cluster)

        respond_cluster = {}
        if negAnalyzer.cluster:
            respond_cluster = negAnalyzer.cluster
        else:
            respond_cluster = {1: list(negAnalyzer.D.keys())}
        logger.info(respond_cluster)
        # 然后是信息的封装
        cluster_result = []

        sample_list = []
        period_len_list = []
        for cluster_id in respond_cluster.keys():
            cluster_list = respond_cluster[cluster_id]
            with open(rejects_file, 'r') as reader:
                json_list = reader.readlines()
                # 同figure建模一样，选择周期最短那组数据的维数为基准
                for json_str in json_list:
                    json_obj = json.loads(json_str)
                    id = int(json_obj['id'])
                    if id in cluster_list:
                        # logger.info("id :" + str(id))
                        period_len_list.append(min(len(json_obj['actual_pressure']),
                                                   len(json_obj['actual_location']),
                                                   len(json_obj['actual_speed'])))
                        sample_file = sample.ProductSample(id, frame_list=period_len_list, is_load=True,
                                                           json_obj=json_obj)
                        sample_list.append(sample_file)
            # 返回的聚类中心图像是特征数组的均值
            # logger.info(str(period_len_list))
            N = np.min(period_len_list)
            sample_index = np.array(list(range(0, N, Collection_frequency)))
            ch1_arr = np.zeros(N)
            ch2_arr = np.zeros(N)
            speed_arr = np.zeros(N)
            for sample_file in sample_list:
                info = sample_file.get_userful_info_dic()
                ch1_arr += np.array(info['actual_pressure'])[sample_index]
                ch2_arr += np.array(info['actual_location'])[sample_index]
                speed_arr += np.array(info['actual_speed'])[sample_index]
            ch1_arr = ch1_arr / len(sample_list)
            ch2_arr = ch2_arr / len(sample_list)
            speed_arr = speed_arr / len(sample_list)
            sample_list = []
            # 更新聚类信息
            cluster_result.append({
                "cluster_id": cluster_id,
                "cluster_list": cluster_list,
                "cluster_pressure": ch1_arr.tolist(),
                "cluster_location": ch2_arr.tolist(),
                "cluster_speed": speed_arr.tolist()
            })

        result = {}
        result.update({"cluster_result": list(cluster_result)})
        # logger.info("result: " + str(result))
        return utils.R.ok(json.dumps(result))

    elif mode == "assign":
        for cluster in data["cluster_result"]:
            label_name = models.Label.objects.get(id=cluster["label_id"]).label_name
            reason_name = models.Reason.objects.get(id=cluster["reason_id"]).reason_name

            file_path = os.path.join(settings.BASE_DIR, 'data', 'sample', db_mould.mould_code,
                                     "负样本信息", label_name)
            file_name = os.path.join(file_path, str(reason_name + ".csv"))
            if not os.path.exists(file_path):
                os.makedirs(file_path)
            if not os.path.exists(file_name):
                with open(file_name, mode='w') as fp:
                    fp.close()
            for sample_id in list(cluster["cluster_list"]):
                utils.restore_sample(unassigned_file, file_name, sample_id)

                # 数据库中sample也需要更新 zxy 修改于 20200114
                sample_file = models.Sample.objects.get(mould=db_mould, sample_id=sample_id)
                sample_file.label_id = cluster["label_id"]
                sample_file.reason_id = cluster["reason_id"]
                sample_file.save()
            try:
                neg_figure = models.NegFigure.objects.get(mould=db_mould, label_id=cluster["label_id"])
            except:
                neg_figure = models.NegFigure.objects.create(mould=db_mould, label_id=cluster["label_id"])
            count = neg_figure.get_count(cluster["reason_id"])
            if not count:
                count = 0
            neg_figure.set_count(cluster["reason_id"],
                                 count + len(list(cluster["cluster_list"])))
            neg_figure.save()

        return utils.R.ok("已给聚类分配好标签")
    else:
        return utils.R.error("无效请求，请检查请求参数")


# zxy 修改于 20200111
# 两种模式：1. 返回样本的实际位置，速度，压力用来显示图像
#          2. 打标签
def assign_sample(db_mould, mode, data):
    # data = {
    #     "sample_result":
    #         {"sample_id": 125, "label_id": 11, "reason_id": 3}
    #     ,
    #     "time": ""
    # }
    # data = {
    #     "sample_result":
    #         {"sample_id": 125}
    #     ,
    #     "time": ""
    # }
    unassigned_file = os.path.join(settings.BASE_DIR, 'data', 'sample', db_mould.mould_code,
                                   'temp', 'unassigned.csv')
    rejects_file = os.path.join(settings.BASE_DIR, 'data', 'sample', db_mould.mould_code,
                                'rejects.csv')
    if not os.path.exists(unassigned_file) or not os.path.exists(rejects_file):
        return utils.R.error("路径不存在，请检查路径是否完整")
    sample_id = data["sample_result"]["sample_id"]

    if mode == "image":
        sample_file, period_len_list = utils.get_sample(rejects_file, sample_id)
        if not sample_file:
            return utils.R.error("未找到该样本")
        N = np.min(period_len_list)
        sample_index = np.array(list(range(0, N, Collection_frequency)))
        ch1_arr = np.zeros(N)
        ch2_arr = np.zeros(N)
        speed_arr = np.zeros(N)
        info = sample_file.get_userful_info_dic()
        ch1_arr += np.array(info['actual_pressure'])[sample_index]
        ch2_arr += np.array(info['actual_location'])[sample_index]
        speed_arr += np.array(info['actual_speed'])[sample_index]

        result_label_name = None
        max_detect_result = 0
        detect_result_dict = {}
        negFigure_list = models.NegFigure.objects.filter(mould=db_mould)
        flag_no_figure = True
        for negFigure in negFigure_list:
            label_name = models.Label.objects.get(id=negFigure.label_id).label_name
            detect_result_dict.update({label_name: {}})
            for i in range(3, 23):
                figure_reason = negFigure.get_figure_by_reason(i)
                if figure_reason.length == "":
                    continue
                else:
                    flag_no_figure = False
                    reason_name = models.Reason.objects.get(id=i).reason_name
                    detect_result = detect_sample(sample_file, figure_reason, isPostive=False)
                    detect_result_dict[label_name].update({reason_name: detect_result})
                    if detect_result > max_detect_result:
                        max_detect_result = detect_result
                        result_label_name = label_name

        # 如果能够计算出reason可能性
        if not flag_no_figure:
            result_reason_dict = detect_result_dict[result_label_name]
            logger.info("label: " + str(result_label_name))
            # 只保留最大可能性的前三个
            while len(result_reason_dict) > 3:
                min_reason_name = ""
                min_reason_result = 0
                for reason in result_reason_dict.keys():
                    min_reason_name = reason
                    min_reason_result = result_reason_dict[reason]
                    break
                for reason in result_reason_dict:
                    if result_reason_dict[reason] < min_reason_result:
                        min_reason_name = reason
                        min_reason_result = result_reason_dict[reason]
                result_reason_dict.pop(min_reason_name)
        else:
            result_label_name = "缺少负样本画像，暂时无法预判"
            result_reason_dict = {}
        # 封装返回数据
        sample_result = {
            "sample_id": sample_id,
            "sample_pressure": ch1_arr.tolist(),
            "sample_position": ch2_arr.tolist(),
            "sample_speed": speed_arr.tolist(),
            "sample_label": result_label_name,
            "sample_reason": result_reason_dict
        }
        logger.info("label: " + str(result_label_name))
        logger.info(str(result_reason_dict))
        result = {}
        result.update({"sample_result": sample_result})
        return utils.R.ok(json.dumps(result))

    elif mode == "assign":
        new_label_id = data["sample_result"]["label_id"]
        new_reason_id = data["sample_result"]["reason_id"]
        new_label_name = models.Label.objects.get(id=new_label_id).label_name
        new_reason_name = models.Reason.objects.get(id=new_reason_id).reason_name

        origin_label_id = models.Sample.objects.get(mould=db_mould, sample_id=sample_id).label_id
        origin_reason_id = models.Sample.objects.get(mould=db_mould, sample_id=sample_id).reason_id

        file_path = os.path.join(settings.BASE_DIR, 'data', 'sample', db_mould.mould_code,
                                 "负样本信息", new_label_name)
        file_name = os.path.join(file_path, str(new_reason_name + ".csv"))
        if not os.path.exists(file_path):
            os.makedirs(file_path)
        if not os.path.exists(file_name):
            with open(file_name, mode='w') as fp:
                fp.close()
        utils.restore_sample(unassigned_file, file_name, sample_id)

        try:
            neg_figure = models.NegFigure.objects.get(mould=db_mould, label_id=new_label_id)
        except:
            neg_figure = models.NegFigure.objects.create(mould=db_mould, label_id=new_label_id)

        count = neg_figure.get_count(new_reason_id)
        if not count:
            count = 0
        neg_figure.set_count(new_reason_id, count + 1)
        neg_figure.save()

        # 也可能是修改标签，需要减1  zxy 修改于 20200213
        if origin_label_id >= 3 and origin_reason_id >= 3:
            try:
                origin_neg_figure = models.NegFigure.objects.get(mould=db_mould, label_id=origin_label_id)
                origin_neg_figure.set_count(origin_reason_id,
                                            origin_neg_figure.get_count(origin_reason_id) - 1)
                origin_neg_figure.save()
            except Exception as e:
                logger.info(str(e))
                logger.info(f"数据库信息不完整, label_id{origin_label_id}, reason_id{origin_reason_id}")

        # 数据库中sample也需要更新 zxy 修改于 20200114
        sample_file = models.Sample.objects.get(mould=db_mould, sample_id=sample_id)
        sample_file.label_id = new_label_id
        sample_file.reason_id = new_reason_id
        sample_file.save()

        logger.info(f"已给样本id{sample_id}分配好标签, 标签: {new_label_name}, 原因: {new_reason_name}")
        return utils.R.ok("已给样本分配好标签")
    else:
        return utils.R.error("无效请求，请检查请求参数")
