import random
from copy import deepcopy
from typing import List
from Clustering import Clustering
from distance import euclid_dis

def sequential(sp: List[List], th: int, M: int, get_dis=euclid_dis) -> List[Clustering]:
    '''
    顺序聚类分析
    输入 样本集 距离阈值 最大聚集数 距离算法
    返回 样本聚类结果
    距离默认采用欧式距离
    类与样本的距离采用中心距离
    '''
    clu = []
    for s in sp:
        # 最开始分析的样本独立成类
        if not clu:
            clu.append(Clustering().add(s).calc_m())
            print(f"第{len(clu)}个聚类产生: ", clu[-1])
            continue

        # 记录最短距离和对应的聚类编号
        min_dis, idx = float('inf'), 0

        # 计算当前样本与已有的每一个类的中心距离,更新记录最近距离
        for i,c in enumerate(clu):
            dis = get_dis(c.get_m(), s)
            if dis < min_dis:
                min_dis = dis
                idx = i

        # 如果距离达到阈值且最大聚类数未达到上限，则当前样本自成一个类
        if min_dis > th and len(clu) < M:
            clu.append(Clustering().add(s).calc_m())
            print(f"本轮最短距离为{min_dis}")
            print(f"第{len(clu)}个聚类产生: ", clu[-1])
        # 否则样本加入最近类
        else:
            clu[idx].add(s).calc_m()
            print(f"本轮最短距离为{min_dis},未超过阈值{th}")
            print(f"第{idx}个聚类添加了样本: ", s, "得到 ", clu[idx])

    return clu
        

def hierarchical(sp: List[List], th: int, M: int, get_dis=euclid_dis) -> List[Clustering]:
    '''
    谱系聚类分析
    输入 样本集 距离阈值 预定类别数 距离算法
    返回 样本聚类结果
    距离默认采用欧式距离
    类与类的距离采用样本均值距离
    '''    
    # 预定类别数必须小于样本总数
    if M > len(sp):
        raise ValueError("M must be less than the number of samples!")

    # 初始状态样本自成一类
    clu = [Clustering().add(s).calc_m() for s in sp]

    # 谱系合并
    while True:
        # 聚类数达到预定类别数
        if len(clu) == M:
            print(f"当前轮聚类数达到预定类别数{M}")
            break

        
        # 记录最短距离和对应的聚类编号
        min_dis, idx1, idx2 = float('inf'), 0, 0
        for i,c1 in enumerate(clu):
            for j,c2 in enumerate(clu):
                if i == j:
                    continue
                dis = get_dis(c1.get_m(), c2.get_m())
                if dis < min_dis:
                    min_dis, idx1, idx2 = dis, i, j
        
        # 最短类间距离大于阈值
        if min_dis > th:
            print(f"当前轮最短类间距离为{min_dis}, 大于阈值{th}")
            break
        
        #  合并两个聚类
        print(f"当前轮最短类间距离为{min_dis}, 小于阈值{th}")
        print(f"聚类: {clu[idx1]} 与聚类: {clu[idx2]} 合并")
        clu[idx1].merge(clu[idx2]).calc_m()
        print(f"合并后结果: {clu}")
        clu.pop(idx2)
    
    return clu


def k_means(sp: List[List], clu: List[Clustering] , get_dis=euclid_dis) -> List[Clustering]:
    '''
    K均值聚类分析
    输入 样本集 初始聚类集 距离算法
    返回 样本聚类结果
    距离默认采用欧式距离
    类与样本的距离采用中心距离
    '''   
    clu = deepcopy(clu)
    while True:
        # 存放这一轮的聚类结果
        new_clu = [Clustering() for _ in range(len(clu))]
        
        #  遍历每个样本
        for s in sp:
            min_dis, idx = float('inf'), 0

            # 找到离当前样本最近的聚类
            for i,c in enumerate(clu):
                dis = get_dis(s,c.get_m())
                if dis < min_dis:
                    min_dis, idx = dis, i

            new_clu[idx].add(s).calc_m()

        # 判断是否收敛
        for i in range(len(clu)):
            if not clu[i].is_equal(new_clu[i]):
                clu = deepcopy(new_clu)
                break
        else:
            return new_clu


def random_clu(samples: List[List], K: int) -> List[Clustering]:
    '''
    将样本集随机取K个样本作为K个聚类
    返回 聚类集
    '''
    return [Clustering().add(s) for s in random.sample(samples,K)]


def dunn(clu: List[Clustering], get_dis) -> int:
    '''
    计算聚类分析结果的Dunn指数
    输入 聚类集
    返回 Dunn指数
    '''
    min_dis, max_dis = float('inf'), float('-inf')
    for i,c1 in enumerate(clu):
        max_dis = max(max_dis, c1.largest_dis_in_clu(get_dis))
        for j,c2 in enumerate(clu):
            if i == j:
                continue
            min_dis = min(min_dis, c1.smallest_dis_between_clu(c2,get_dis))
    if max_dis == 0:
        return float('inf')
    return min_dis / max_dis


def davies_bouldin(clu: List[Clustering]) -> int:
    '''
    计算聚类分析结果的Davies-Bouldin指数
    输入 聚类集
    返回 Davies-Bouldin指数     
    '''
    sum = 0

    for i,c1 in enumerate(clu):
        max_r = float('-inf')
        for j,c2 in enumerate(clu):
            if i == j : 
                continue
            max_r = max(max_r, c1.similarity(c2))
        sum += max_r
    return sum / len(clu)


def inner_sum_squares(clu: List[Clustering]) -> int:
    '''
    计算聚类分析结果各组内平方和
    '''
    sum = 0
    for c in clu:
        sum += (c.inner_variance()**2)*(len(c.samples))
    return sum



# for debug
# samples = [[5,2],[1,2],[2,1],[6,2],[1,1],[3,1],[7,-1],[5,-1]]
# samples = [[4,1],[4,0],[5,0],[3,0],[2,0],[3,0],[0,0]]
# clu = [Clustering().add([3,4]).calc_m(),Clustering().add([0,0]).calc_m()]
# s = sequential(samples,3,5,euclid_dis)
# print(s)
# print(dunn(s,euclid_dis))
# print(davies_bouldin(s))
# print(s[0].inner_variance())
# print(s[1].inner_variance())
# print(euclid_dis(s[0].m,s[1].m))


# clu = [[5.0, 2.0], [6.0, 2.0], [7.0, -1.0], [5.0, -1.0]], [[1.0, 2.0], [2.0, 1.0], [1.0, 1.0], [3.0, 1.0]]
# c1 = Clustering().add([5,2]).add([6,2]).add([7,-1]).add([5,-1]).calc_m()
# c2 = Clustering().add([1,2]).add([2,1]).add([1,1]).add([3,1]).calc_m()
# c1 = Clustering([[5.0, 2.0], [6.0, 2.0], [7.0, -1.0], [5.0, -1.0]]).calc_m()
# c2 = Clustering([[1.0, 2.0], [2.0, 1.0], [1.0, 1.0], [3.0, 1.0]]).calc_m()
 
# print(c1.inner_variance())

# c2 = Clustering().add([1,2]).add([2,1]).add([1,1]).add([3,1]).calc_m()
# print(davies_bouldin([c1,c2]))
# print(dunn([c1,c2],euclid_dis))
# h = hierarchical(samples,3,5,euclid_dis)
# print(h)
# print(dunn(h,euclid_dis))
# print(davies_bouldin(h))


# print(k_means(s,random_clu(s,2),euclid_dis))
# random_clu(samples,5)
# c = Clustering()
# for s in samples:
#     c.add(s)
# print(c)
# print(c.largest_dis_in_clu(euclid_dis))


                
