import math
import numpy as np
from scipy.spatial.distance import pdist
from dataset.dataset import DataSet


class DataAnalyzer:
    @staticmethod
    def __get_aspect_ratios(classnames, boundingboxes):
        """
        将需要的信息集合， 方便实用
        :param classnames: 数据集中所有的类别名
        :param boundingboxes: 数据集中所有的boundingbox
        :return: 返回形如[[a, b],]形式的列表
        """
        aspect_ratios = []
        for classname, boundingbox in zip(classnames, boundingboxes):
            for cls, bod in zip(classname, boundingbox):
                aspect_ratios.append([cls, bod.get_aspect_ratio()])
        return aspect_ratios

    @staticmethod
    def __filtrate_data(data, classnames):
        """
        过滤掉多余的信息
        :param data: 需要过滤的信息
        :param classnames: 根据类别名来过滤
        :return: 新的data
        """
        new_data = []
        for i in data:
            if classnames in i[0]:
                new_data.append(i)
        return new_data

    @staticmethod
    def __get_scale(classnames, scales):
        """
        将需要的信息集合， 方便实用
        :param classnames: 数据集中所有的类别名
        :param scales: 数据集中所有的scales
        :return: 返回形如[[a, b],]形式的列表
        """
        scales_ = []
        for classname, scale in zip(classnames, scales):
            scales_.append([classname, scale])
        return scales_

    @staticmethod
    def __deal_data(data, max_value, min_value, bins):
        """
        分析数据，得到每个bins里的分布
        :param data:需要分析的数据
        :param max_value:需要分析的最大值
        :param min_value:需要分析的最小值
        :param bins:需要分布的个数
        :return:返回数据分布信息
        """
        for i in range(len(data)):
            if data[i] >= max_value:
                data[i] = max_value
            if data[i] <= min_value:
                data[i] = min_value
        length = (max_value - min_value) / bins
        data_distribution = {}
        for i in range(bins + 1):
            data_distribution_s = []
            for j in range(len(data)):
                if min_value + length * i < data[j] < min_value + length * (i + 1):
                    data_distribution_s.append(data[j])
            data_distribution[min_value + length * i] = len(data_distribution_s) / len(data)
        return data_distribution

    @staticmethod
    def analyze_sample_distribution(dataset_dir, class_list):
        """
        分析数据集中样本的分布情况
        :param dataset_dir: 需要分析的数据集
        :param class_list: 如果对特定的目标进行分析，传入特定目标的类别
        :return:返回一个[[类别， 个数],]的列表
        """
        dataset = DataSet(dataset_dir)
        class_num = []
        names = []
        class_names = dataset.get_classnames()
        all_num = 0
        for class_name in class_names:
            names.extend(class_name)
        for class_name in class_list:
            cnt = 0
            for class_name_0 in names:
                if class_name == class_name_0:
                    cnt += 1
            all_num += cnt
            class_num.append([class_name, cnt])
        classname_num = {}
        if all_num == 0:
            ValueError(print('数据集没有当前类别'))
        else:
            for class_name in class_num:
                classname_num[class_name[0]] = {'num': class_name[1], 'ratio': class_name[1]/all_num}
        return classname_num

    @staticmethod
    def analyze_data(dataset_dict, class_list):
        result = {'distribution': {}, 'consistency': {}}
        for key, value in dataset_dict.items():
            result['distribution'][key] = DataAnalyzer.analyze_sample_distribution(value, class_list)
        con = DataAnalyzer.analyze_target_consistency(result)
        result['consistency'] = con
        return result

    @staticmethod
    def __vectorcosine(x, y):
        """
        计算向量夹角余弦
        :param x:
        :param y:
        :return:
        """
        vc = []
        for i in range(1, len(x) - 2):
            xc1 = x[i] - x[i - 1]
            xc2 = x[i + 1] - x[i]
            yc1 = y[i] - y[i - 1]
            yc2 = y[i + 1] - y[i]
            print((xc1 * xc2 + yc1 * yc2) / (math.sqrt(xc1 ** 2 + yc1 ** 2) * math.sqrt(xc2 ** 2 + yc2 ** 2)), '\n')
            vc.append((xc1 * xc2 + yc1 * yc2) / (math.sqrt(xc1 ** 2 + yc1 ** 2) * math.sqrt(xc2 ** 2 + yc2 ** 2)))
        return vc

    @staticmethod
    def analyze_target_consistency(sample_distribution):
        """
        比较两个数据集数据分布的一致性
        :param sample_distribution: 两个数据集的样本分布
        :return: 返回bool型的数据
        """
        samples = {}
        for key, value in sample_distribution['distribution'].items():
            sample = {}
            for key_0, value_0 in value.items():
                sample[key_0] = value_0['ratio']
            samples[key] = sample
        consistency = {}
        samples_keys = list(samples.keys())
        samples_values = list(samples.values())
        for num in range(1, len(samples_values)):
            dataset_1 = samples_values[0]
            dataset_2 = samples_values[num]
            dataset_1_name = list(dataset_1.keys())
            dataset_1_ratio = list(dataset_1.values())
            dataset_2_name = list(dataset_2.keys())
            dataset_2_ratio = list(dataset_2.values())
            for i in range(len(dataset_1_name)):
                if dataset_1_name[i] not in dataset_2_name:
                    dataset_1_ratio.insert(i, 0.0)
            for i in range(len(dataset_2_name)):
                if dataset_2_name[i] not in dataset_1_name:
                    dataset_2_ratio.insert(i, 0.0)
            dataset_1_ratio = np.array(dataset_1_ratio)
            dataset_2_ratio = np.array(dataset_2_ratio)
            cos1 = np.vstack([dataset_1_ratio, dataset_2_ratio])
            p1 = 1 - pdist(cos1, 'cosine')
            p1 = float(p1[0])
            consistency[samples_keys[0]+'-'+samples_keys[num]] = p1
        return consistency


def main():
    nco = []
    file = open(r"D:\nco.txt", 'r')
    for i in file:
        nco.append(i.strip())
    print(nco)
    a = analyze_sample_distribution(r'D:\test', ['gerber22', 'gerber21'])
    print(a)
    b = analyze_data({'train': r'D:\quechaoshuju\nco\train', 'evaluate': r'D:\quechaoshuju\nco\val',
                      'test': r'D:\quechaoshuju\nco\test'}, nco)
    print(b)

