import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
plt.rcParams['font.family']='SimHei'
# plt.rcParams['figure.dpi'] = 120  #默认像素为100
# ================================================================================================
warnings.filterwarnings("ignore")
# threshold 指定超过多少使用省略号，np.inf代表无限大
np.set_printoptions(threshold=np.inf)
#显示所有列
pd.set_option('display.max_columns', None)
#显示所有行
pd.set_option('display.max_rows', None)
# ================================================================================================


def initCentroids(dataSet,k):
    # 初始化K个质心，随机获取(从样本数据中随机抽出2个样本；注：以行为单位)
    # random.seed(1)
    centroidList=dataSet.iloc[random.sample(list(dataSet.index),k)]
    centroidList=[list(centroidList.loc[x,:]) for x in list(centroidList.index)]
    return centroidList

# 计算向量1和向量2之间的欧氏距离
def calcuDistance(vec1,vec2):
    return np.sqrt(np.sum(np.square(vec1-vec2)))

# （三）质心求距，样本归簇
def minDistance(dataSet,centroidList):
    cluster_dict={}  # {样本，簇}  字典要求样本需要唯一性
    len_item=list(dataSet.index)
    for item in len_item:
        vec1=np.array(dataSet.iloc[item,:])    #对于列表格式的样本需要向量化
        dis={}  #{类：距离}
        flag = 0   #簇类
        for i in range(len(centroidList)):
            vec2=np.array(centroidList[i])
            dis_i=calcuDistance(vec1, vec2)    #距离计算
            dis[str(i)]=dis_i   #dis字典封装 {类：距离}
            flag += 1  #簇类
        item=list(map(tuple,[dataSet.iloc[item,:]]))[0]   #单个列表转换元祖需要再封装一层[]，map函数后需要list解封
        cluster_dict[item]=int(sorted(dis.items(),key=lambda item:item[1])[0][0])   #[0][0]最小值对应的 item 中的键(簇类)
    return cluster_dict   # {样本，簇}

# （四）重新计算质心
def new_Centroids(dict_cluster):
    Cen_list = list(set(dict_cluster.values()))  #簇类列表；(分簇计算质心)
    new_Cen_list = []  #质心列表
    for j in Cen_list:
        l_j = []
        for i in dict_cluster:
            if dict_cluster[i] == j:
                l_j.append(i)
        l_j = np.array(list(map(list, l_j)))  #转化为数组
        new_Cen_list.append(list(np.mean(l_j, 0))) #计算质心
    dict_clus_cen = {}
    for i in range(len(new_Cen_list)):
        dict_clus_cen[tuple(new_Cen_list[i])] = i
    return	new_Cen_list,dict_clus_cen


# (五) 均方误差函数(单簇单样本--多样本--求和--多簇--求和)
def clusters_loss(Centroids_list,dict_clus): #质心列表，字典（元祖样本，簇）
    l = []
    loss_clusters = [] #均方误差列表
    for i in range(len(Centroids_list)): # 遍历质心向量：向量1
        lis = [] #簇样本列表
        for j in dict_clus:  #遍历 样本
            if dict_clus[j] == i:  # 筛选各簇样本
                lis.append(j)
        c = list(map(list, lis))
        lis_sq = [] #单簇多样本 均方误差列表
        for k in c:
            aa = calcuDistance((Centroids_list[i]),np.array(k))  #单个簇内单个样本误差
            lis_sq.append(aa)
        loss_clusters.append(np.sum(lis_sq)) #添加单个簇的 均方误差
    return np.sum(loss_clusters),loss_clusters   #簇间求和

# (六) kmeans聚类核心算法(k=2)
def model_kmeans(list_a):
    data_k = initCentroids(list_a,2)
    stop = 1  # 循环参数
    los = []  # 样本误差列表
    clus = minDistance(list_a, data_k)  # 根据质心data_k分簇
    while stop > 0:
        clus = minDistance(list_a, data_k)  # 根据质心data_k分簇
        # print(clus)
        new_cent, dict_clus_cen = new_Centroids(clus)  # 重新计算质心
        loss1, loss1_list = clusters_loss(new_cent, clus)  # 均方误差和，均方误差列表
        los.append(loss1)
        # print(loss1_list)
        data_k = new_cent
        if len(los) >= 2:
            stop = (los[-2] - los[-1]) / los[-2]  # 环比 均方误差和的变化率
        else:
            stop = 1
    return clus

# (七) 分类结果可视化
def plt_cluster(data_list):
    clusMark = ['r', 'b', 'g', 'k', 'y', 'plum','salmon','indigo','skyblue','firebrick','peru','teal','gold']
    clus_list = list(set(data_list['cluster']))
    plt.figure()
    plt.xticks(label='jd')
    plt.yticks(label='wd')
    print(list(data_list.index))
    for i in list(data_list.index):
        list_X=list(data_list.loc[i,['经度','纬度','cluster']]) #,'label'
        for j in clus_list:
            if list_X[-1]==j:
                plt.plot(list_X[0],list_X[1],c=clusMark[j],marker='o',markersize=5)
    for i in list(data_list.index):
        list_X = list(data_list.loc[i, ['经度', '纬度', 'label']])  # ,'label'
        plt.annotate('{}'.format(str(list_X[2])), xy=(list_X[0],list_X[1]), xytext=(list_X[0],list_X[1])) #,arrowprops=dict(arrowstyle="->", connectionstyle="arc3")
    plt.title('东湖项目片区划分--二分K均值聚类')
    plt.legend()
    plt.show()

# (八) 小区距离【地理位置】矩阵
def build_dist_mat(da_jwd,da_dtjl):
    global dist_mat
    n = len(list(da_jwd['communityid']))
    index = list(da_jwd['communityid'])
    columns = list(da_jwd['communityid'])
    dist_mat = pd.DataFrame(np.zeros([n, n]),index=index,columns=columns)
    for i in index:
        for j in columns:
            try:
                dis_1=list(da_dtjl[(da_dtjl['origins'] == i) & (da_dtjl['destinations'] == j)]['distance'])[0]+random.random()
            except:
                try:
                    dis_1 = list(da_dtjl[(da_dtjl['origins'] == j) & (da_dtjl['destinations'] == i)]['distance'])[0]+random.random()
                except:
                    dis_1 =0
            dist_mat.loc[i,j]=dis_1
    return dist_mat

# （九）聚类结果更新
def min_clus_train(da,clus):   # 更新原始聚类结果
    da['cluster'] = da[['经度', '纬度']].apply(lambda x: clus[tuple(x)], axis=1)
    a = da.groupby(['cluster'])['tdzl'].agg(['sum']).reset_index().sort_values(by=['sum'])
    yz_min = np.min(a['sum'])
    return da,a,yz_min


#  （十）最小簇优化_加点
def prio_clus_small(da_jwd,min_clus,yz_min,clus,dis_up):
    print('是否需要优化：',yz_min <= w_down)
    step=0
    da_jwd_min = da_jwd[da_jwd['cluster'] == min_clus]
    da_jwd_max = da_jwd[da_jwd['cluster'] != min_clus]
    while yz_min <= w_down and step>=0  :
        xx=yz_min+random.random()
        print('正在加点优化',yz_min,w_down)
        da_jwd_max['dis_min']=0
        cen_lis = list(da_jwd_min[['经度', '纬度']].apply(sum, axis=0))

        # 找离小簇最近的大簇点【反：找离大簇最近的小簇点】
        da_jwd_max['dis_min'] = da_jwd_max[['经度', '纬度']].apply(lambda x: calcuDistance(np.array(list(x)), np.array(cen_lis)), axis=1)
        da_jwd_max = da_jwd_max.sort_values(by=['dis_min'])
        sam_min=list(da_jwd_min['communityid']);l_min=len(sam_min)
        sam_max=list(da_jwd_max['communityid']);l_max=len(sam_max)
        dis_dian=pd.DataFrame(np.zeros((da_jwd_min.shape[0],da_jwd_max.shape[0])),index=range(l_min),columns=range(l_max))
        for i in range(l_min):
            for j in range(l_max):
                dis_dian.loc[i, j] = dist_mat.loc[sam_min[i],sam_max[j]]
        min_v = np.min(np.min(dis_dian, axis=0))
        index_y = dis_dian.apply(lambda x: x == min_v, axis=0).apply(sum, axis=0)  # 列
        min_y = sam_max[index_y[index_y==1].index[0]] #最近点
        if min_v <= dis_up:
            clus[tuple(da_jwd_max[da_jwd_max['communityid']==min_y][['经度', '纬度']].values[0])] = min_clus
            da_jwd, a, yz_min = min_clus_train(da_jwd, clus)  # 更新
            print('加点优化通过！')
            yy = yz_min+random.random()
            step = yy - xx  # 增益
        else:
            print('加点优化未通过！')
            break
        da_jwd_min = da_jwd[da_jwd['cluster'] == min_clus]
        da_jwd_max = da_jwd[da_jwd['cluster'] != min_clus]
    return clus
#  （十一）最小簇优化_减点
def prio_clus_big(da_jwd,max_clus,yz_min,clus,dis_up):
    da_jwd_max=pd.DataFrame(np.zeros((3,4)))
    print('是否需要优化：', yz_min > w_up)
    while da_jwd_max.empty is False and  yz_min > w_up :
        print('正在减点优化',yz_min,w_up)
        da_jwd_min = da_jwd[da_jwd['cluster'] == max_clus]  # 大簇
        da_jwd_max = da_jwd[da_jwd['cluster'] != max_clus]  # 小簇
        cen_lis = list(da_jwd_min[['经度', '纬度']].apply(sum, axis=0))

        # 找离小簇最近的大簇点【反：找离大簇最近的小簇点】
        da_jwd_max['dis_min'] = da_jwd_max[['经度', '纬度']].apply(
            lambda x: calcuDistance(np.array(list(x)), np.array(cen_lis)), axis=1)
        da_jwd_max = da_jwd_max.sort_values(by=['dis_min'])

        # 找出两个簇最近的两个点
        sam_min = list(da_jwd_min['communityid']);l_min = len(sam_min)
        sam_max = list(da_jwd_max['communityid']);l_max = len(sam_max)
        dis_dian = pd.DataFrame(np.zeros((da_jwd_min.shape[0], da_jwd_max.shape[0])), index=range(l_min),
                                columns=range(l_max))
        for i in range(l_min):
            for j in range(l_max):
                dis_dian.loc[i, j] = dist_mat.loc[sam_min[i], sam_max[j]]
        min_v = np.min(np.min(dis_dian, axis=0))
        index_y = dis_dian.apply(lambda x: x == min_v, axis=0).apply(sum, axis=0)  # 列
        min_y = sam_max[index_y[index_y == 1].index[0]] #最近点
        if min_v <= dis_up:
            print('减点优化通过！')
            clus[tuple(da_jwd_max[da_jwd_max['communityid'] == min_y][['经度', '纬度']].values[0])] = max_clus
            da_jwd, a, yz_min = min_clus_train(da_jwd, clus)  # 更新
        else:
            print('减点优化未通过！')
            break
    return clus

# （十二）单次运行二分类聚类
def model_2cla_k(da_jwd,w_down,w_up,dis_up,n_iter):
    yz_min = 0
    count = 1 # 结果标签
    dt_juli() # 激活距离
    co = 0
    #初始化二分： 已分裂的尽量反复迭代至 大于重量下限【迭代上限100次】
    print('-----------正在初始化分裂ing-----------')
    if (yz_min <= w_down or yz_min >= w_up) and co <= n_iter:
        clus = model_kmeans(da_jwd[['经度', '纬度']])
        da_jwd, a, yz_min=min_clus_train(da_jwd,clus)
        co += 1
    min_clus=list(a[a['sum'] == yz_min]['cluster'])[0] #2簇中小簇标签
    max_clus = list(a[a['sum'] != yz_min]['cluster'])[0]
    da_jwd['label'] = list(range(1, da_jwd.shape[0] + 1))
    print('-----------正在初始化优化ing-----------')
    if yz_min<w_down:
        clus = prio_clus_small(da_jwd,min_clus, yz_min, clus,dis_up)
    elif yz_min>w_up:
        clus = prio_clus_big(da_jwd,max_clus, yz_min, clus,dis_up)
    # 下一次二分准备
    da_jwd, a, yz_min = min_clus_train(da_jwd, clus)
    da_jwd['label'] = list(range(1, da_jwd.shape[0] + 1))
    da_jwd_next = da_jwd[da_jwd['cluster'] !=min_clus ].reset_index(
        drop=True)
    da_aim = da_jwd[da_jwd['cluster'] ==min_clus]
    da_aim['cluster'] = count
    da_aim_lis = [da_aim]
    count+=1

    print('-----------正在下一次分裂ing-----------')
    yz_max=w_up+100
    while da_jwd_next.empty is False and da_jwd_next.shape[0]>1 and ( yz_min<=w_down or  yz_max>w_up):
        clus = model_kmeans(da_jwd_next[['经度', '纬度']])
        da_jwd_next, a, yz_min = min_clus_train(da_jwd_next, clus)
        da_jwd_beiyon = da_jwd_next
        da_jwd_next['cluster'] = da_jwd_next[['经度', '纬度']].apply(lambda x: clus[tuple(x)], axis=1)
        da_jwd_next['label'] = list(range(1, da_jwd_next.shape[0] + 1))
        min_clus = list(a[a['sum'] == yz_min]['cluster'])[0]
        max_clus = list(a[a['sum'] != yz_min]['cluster'])[0]
        yz_max=np.max(a['sum'])

        '''对于小簇增加点【考虑大簇本身大小是否有增加或者减少的能力】或者减少点'''
        if yz_min < w_down:
            clus = prio_clus_small(da_jwd_next, min_clus, yz_min, clus,dis_up)
        elif yz_min > w_up:
            clus = prio_clus_big(da_jwd_next, max_clus, yz_min, clus,dis_up)
        da_jwd_next['cluster'] = da_jwd_next[['经度', '纬度']].apply(lambda x: clus[tuple(x)], axis=1)
        # 下一次二分准备
        da_jwd_next['label'] = list(range(1, da_jwd_next.shape[0] + 1))
        da_aim = da_jwd_next[da_jwd_next['cluster'] == list(a[a['sum'] == yz_min]['cluster'])[0]]
        da_jwd_next = da_jwd_next[da_jwd_next['cluster'] != list(a[a['sum'] == yz_min]['cluster'])[0]].reset_index(drop=True)

        if da_jwd_next.empty is False and yz_max>w_up:
            da_aim['cluster'] = count
            da_aim_lis.append(da_aim)
        else: #最后一次无法分裂时处理
            print('-----------分裂结束ending-----------')
            da_jwd_beiyon['cluster'] = count  #
            da_aim_lis.append(da_jwd_beiyon)
        count += 1
    # 数据整合
    da_aim_lis = pd.concat(da_aim_lis, axis=0, ignore_index=True)
    return da_aim_lis

# def read_path(path):
#     da_jwd = pd.read_excel(r'{}'.format(path))
#     da_jwd.drop_duplicates(subset=['cjmc'], inplace=True)
#     da_jwd = da_jwd.reset_index()
#     da_jwd['经度'] = da_jwd['经度'].map(lambda x: round(x, 5))
#     da_jwd['纬度'] = da_jwd['纬度'].map(lambda x: round(x, 5))
#     return da_jwd

# （十三）反复迭代择优 【启发式算法】
def area_division(da_jwd,w_down,w_up,dis_up,n_iter,co=0):
    # 小区骑行路程、时间 【dis_up 约束】
    da_dtjl = dt_juli()
    # 小区地理位置矩阵
    dist_mat = build_dist_mat(da_jwd, da_dtjl)

    da_lis = []
    while co <= n_iter:
        print('第{}次迭代！'.format(co))
        try:
            da_aim_lis = model_2cla_k(da_jwd, w_down, w_up,dis_up, n_iter)
            a = da_aim_lis.groupby(['cluster'])['tdzl'].agg(['sum']).reset_index(drop=True).sort_values(by=['sum'])
            if np.sum(a['sum'])==np.sum(da_jwd['tdzl']):
                da_lis.append([da_aim_lis,np.sum(np.abs(a['sum']-w_down))+np.sum(np.abs(a['sum']-w_up))]) #,da_aim_lis.shape[0]
            co+= 1
        except:
            pass
    da_lis = list(map(tuple, da_lis))
    da_aim_lis = sorted(da_lis, key=lambda item: item[1], reverse=False)[0][0]  # reverse=True降序
    return da_aim_lis

# （十四）小区骑行路程、时间
def dt_juli():
    global da_dtjl
    da_dtjl = pd.read_excel(r'E:\联运科技\历史代码\PAST\lianyun\片区划分聚类\东湖小区站点骑行距离和时间表.xlsx')
    da_dtjl = da_dtjl[['origins', 'destinations', 'distance', 'duration']]
    da_dtjl['distance'] = da_dtjl['distance'].map(
        lambda x: float(x.replace('米', '')) / 1000 if x.find('米') >= 0 else float(x.replace('公里', '')))
    da_dtjl['duration'] = da_dtjl['duration'].map(lambda x: int(x.replace('分钟', '')))
    return da_dtjl

if __name__ == '__main__':
    # (1) 初始化
    # 小区经纬度
    da_jwd = pd.read_excel(r'E:\联运科技\历史代码\PAST\lianyun\片区划分聚类\东湖街道各小区经纬度.xlsx')
    da_jwd.drop_duplicates(subset=['cjmc'], inplace=True)
    da_jwd = da_jwd.reset_index()
    da_jwd['经度'] = da_jwd['经度'].map(lambda x: round(x, 5))
    da_jwd['纬度'] = da_jwd['纬度'].map(lambda x: round(x, 5))


    # (2) 参数
    w_up = 1200   #重量上限 # 【过去一个月：东湖历史最高收集在1200kg左右】
    w_down = 700  #重量下限 #【过去一个月：东湖目前收集员人均收集重量在600kg】
    n_iter = 50  #最大迭代次数
    dis_up =6  # 距离上限 【例如当到距离上限6公里时，加点减点不允通过】

    # (3) 算法训练
    da_aim_lis=area_division(da_jwd,w_down,w_up,dis_up,n_iter)  # n_iter次训练择优

    # (4) 结果保存
    da_aim_lis['label'] = list(range(1, da_aim_lis.shape[0] + 1))  # 可视化和数据比对标签
    da_aim_lis = da_aim_lis.to_json(orient="index")
    
    # a = da_aim_lis.groupby(['cluster'])['tdzl'].agg(['sum','count']).reset_index().sort_values(by=['cluster'])

    # writer = pd.ExcelWriter(r'E:\联运科技\片区划分\clus_result_V2.xlsx')
    # da_aim_lis.to_excel(writer,sheet_name='样本聚类',index=False)
    # a.to_excel(writer,sheet_name='簇内汇总',index=False)
    # writer.save()

    # (5) 结果可视化
    # plt_cluster(da_aim_lis)

