import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage
from sklearn.cluster import AgglomerativeClustering,KMeans
from sklearn.metrics import silhouette_score
from pyecharts import options as opts
from pyecharts.charts import Pie,Timeline


# 1、分层聚类
df_hierarchy = pd.read_csv("tmp/hierarchy_training_data.csv")
# 计算同一客户的下单情况平均值
df_dispose = df_hierarchy.groupby(by='customer').mean().round(2)
print(df_dispose)

# 1.1、利用Linkage()方法进行分层聚类
# 会运行很久
# # 能正常显示中文
# plt.rcParams['font.sans-serif'] = ['SimHei']		# 用来正常显示中文标签
# plt.rcParams['axes.unicode_minus'] = False			# 用来正常显示负号
# # 生成距离矩阵
# dist_matrix = linkage(df_dispose, method='average', metric='euclidean')
# # 绘制树状图
# plt.figure(figsize=(20, 10))
# plt.title('层次聚类树状图')
# plt.xlabel('样本指数')
# plt.ylabel('距离')
# dendrogram(dist_matrix, leaf_rotation=90., leaf_font_size=8.)
# # 将图保存到tmp文件中
# plt.savefig("tmp/层次聚类树状图.png")
# plt.show()

# 2.2、AgglomerativeClustering实现分层聚类
# 计算轮廓系数
def get_silhouette_score(cluster_data, n_clusters):
    # 构建训练
    # 分别使用ward，complete，average三种不同的合并策略测试他的效果
    clustering = AgglomerativeClustering(n_clusters=n_clusters)
    labels = clustering.fit_predict(cluster_data)
    # 返回轮廓系数
    return silhouette_score(cluster_data, labels)

# 寻找最优的n_clusters
def find_best_n_clusters(cluster_data):
    scores = []
    # 寻找2到14之间寻找最优取值
    for n_clusters in range(2, 15):
        # 调用轮廓系数计算
        score = get_silhouette_score(cluster_data, n_clusters)
        scores.append(score)
        print("n_clusters为：",n_clusters,"时，他的轮廓系数为：",score)
    # 利用numpy库中的argmax寻找最优值的位置
    best_n_clusters = np.argmax(scores) + 2
    return best_n_clusters

"""
层次聚类模型,主要是看选取的簇是关键
利用通过轮廓系数来选择最优的n_clusters
具体来说,轮廓系数是对于每个点i,它的簇内距离a(i)和该点到其最近邻簇的平均距离b(i)之差，除以这两者中的较大值。总的轮廓系数则是各点轮廓系数的均值。
要选择最优的n_clusters,需要在不同的n_clusters取值下,计算轮廓系数的平均值,并找到使平均轮廓系数最大的n_clusters数目。
"""
# 选择最优簇数
best_n_clusters = find_best_n_clusters(df_dispose)
score = get_silhouette_score(df_dispose, best_n_clusters)
print("最优best_n_clusters为：",best_n_clusters)
print("最优轮廓系数为：",score)


# 1.3、用分层聚类模型分析每一类人的消费趋向
# 训练模型时不对同一客户的下单情况取平均平均值,增大训练量
df_hierarchy_srainging = pd.read_csv("tmp/hierarchy_training_data.csv")
# 模型训练
model = AgglomerativeClustering(n_clusters=best_n_clusters, affinity='euclidean',linkage='ward')
# 输入数据并训练模型并返回聚类标签
prdictede_label = model.fit_predict(df_hierarchy_srainging)
# 将标签添加到表格对象中
df_hierarchy_srainging['分类标签'] = prdictede_label
# 产看分类情况
print(df_hierarchy_srainging.groupby(by='分类标签')['分类标签'].count())
# 数据准备
# 算出每一类的平均购买情况
label_0 = df_hierarchy_srainging.loc[df_hierarchy_srainging['分类标签'] == 0,:]\
    [['discount%','Food%','Fresh%','Drinks%','Home%','Beauty%','Health%','Baby%','Pets%']].mean().round(2)
label_1 = df_hierarchy_srainging.loc[df_hierarchy_srainging['分类标签'] == 1,:]\
    [['discount%','Food%','Fresh%','Drinks%','Home%','Beauty%','Health%','Baby%','Pets%']].mean().round(2)
label_2 = df_hierarchy_srainging.loc[df_hierarchy_srainging['分类标签'] == 2,:]\
    [['discount%','Food%','Fresh%','Drinks%','Home%','Beauty%','Health%','Baby%','Pets%']].mean().round(2)
label_3 = df_hierarchy_srainging.loc[df_hierarchy_srainging['分类标签'] == 3,:]\
    [['discount%','Food%','Fresh%','Drinks%','Home%','Beauty%','Health%','Baby%','Pets%']].mean().round(2)
# 画饼图进行观察
pie_label = ['折扣','食物(非生鲜)','生鲜类食物','饮料','家居用品','美妆类产品','保健类产品','母婴类产品','宠物用品']
# 数据准备
list_0 = [ [i,j] for i,j in zip(pie_label,label_0.values.tolist())]
list_1 = [ [i,j] for i,j in zip(pie_label,label_1.values.tolist())]
list_2 = [ [i,j] for i,j in zip(pie_label,label_2.values.tolist())]
list_3 = [ [i,j] for i,j in zip(pie_label,label_3.values.tolist())]
# 用pyecahrt画图
# 写一个函数来配置标签项
def new_label_opts():
    return opts.LabelOpts(
            position="outside",
            formatter="{b}:\n{d}%",
        )

c = (
    Pie(init_opts=opts.InitOpts(width='900px',height='900px'))
    .add(
        "标签0的人群",
        list_0,
        center=["20%", "20%"],
        radius=[50, 80],
        label_opts=new_label_opts()
    )
    .add(
        "标签1的人群",
        list_1,
        center=["60%", "20%"],
        radius=[50, 80],
        label_opts=new_label_opts()
    )
    .add(
        "标签2的人群",
        list_2,
        center=["20%", "50%"],
        radius=[50, 80],
        label_opts=new_label_opts()
    )
    .add(
        "标签3的人群",
        list_3,
        center=["60%", "50%"],
        radius=[50, 80],
        label_opts=new_label_opts()
    )
    .set_global_opts(
        title_opts=opts.TitleOpts(title="4类人群的平均购买情况："),
        legend_opts=opts.LegendOpts(
            type_="scroll", pos_top="20%", pos_left="80%", orient="vertical"
        ),
    )
)
c.render("tmp/4类人群的平均购买情况(分层聚类).html")
c.render_notebook()





# 2、KMeans聚类
df_kmeans = pd.read_csv("tmp/KMeans_training_data.csv")
# 数据标准化：使用sklearn.preprocessing模块中的StandardScaler类
from sklearn.preprocessing import StandardScaler
# 创建StandardScaler对象并对数据进行标准化
scaler = StandardScaler()
X = scaler.fit_transform(df_kmeans)
# print(X)

# 2.1确定K值
# 2.1.1手肘法确定K值
# 设置K值集合和存储误差平方和的列表
k_values= range(1,30)
sse_values = []
for k in k_values:
        # 训练模型
        kmeans = KMeans(n_clusters=k,random_state = 333).fit(X) # 确定随机种子
        # 获取聚类的平方距离总和
        SSE = kmeans.inertia_
        sse_values.append(SSE)

# 将see可视化
plt.plot(k_values, sse_values, 'o-')
plt.savefig("tmp/KMeans手肘法确定K值.png")
plt.show()
# 可以看到他大概可以分为9类、10类

# 2.1.2 通过轮廓系数法看看
def get_silhouette_score(cluster_data, n_clusters):
    # 模型训练
    clustering = KMeans(n_clusters = n_clusters,random_state = 333)
    labels = clustering.fit_predict(cluster_data)
    # 返回轮廓系数
    return silhouette_score(cluster_data, labels)

# 寻找最优的K值
def find_best_k_clusters(cluster_data):
    scores = []
    # 寻找2到14之间寻找最优取值
    for k_clusters in range(2,15):
        # 调用轮廓系数计算
        score = get_silhouette_score(cluster_data, k_clusters)
        scores.append(score)
        print("n_clusters为：",k_clusters,"时，他的轮廓系数为：",score)
    # 利用numpy库中的argmax寻找最优值的位置
    best_n_clusters = np.argmax(scores) + 2
    return best_n_clusters
# 查看轮廓系数
best_k_clusters = find_best_k_clusters(X)
score = get_silhouette_score(X, best_k_clusters)
print("最优best_n_clusters为：",best_k_clusters)
print("最优轮廓系数为：",score)

# 2.2分析每一类人的消费趋向
# 模型训练
clustering = KMeans(n_clusters = 9,random_state = 333)
labels = clustering.fit_predict(X)
df_kmeans['分类标签'] = labels


# 画饼图观察每一类别的消费情况
df_kmeans.groupby(by='分类标签')['分类标签'].count()
# 创建时间线对象
timeline = Timeline()
# 写一个函数来配置标签项
def new_label_opts():
    return opts.LabelOpts(
            position="outside",
            formatter="{b}:\n{d}%",
        )
# 算出每一类的平均购买情况
# 创建一个列表类储存不同类别人群的平均购买情况
data_list = []

pie_label = ['折扣','食物(非生鲜)','生鲜类食物','饮料','家居用品','美妆类产品','保健类产品','母婴类产品','宠物用品']
for i in range(len(pie_label)):
    # 指定查找不同标签的数据，并平均购买情况
    label = df_kmeans.loc[df_kmeans['分类标签'] == i,:]\
    [['discount%','Food%','Fresh%','Drinks%','Home%','Beauty%','Health%','Baby%','Pets%']].mean().round(2)
    # 饼图的数据
    pie_data = [ [i,j] for i,j in zip(pie_label,label.values.tolist())]
    data_list.append(pie_data)

    # 创建饼图对象
    c = (
        Pie(init_opts=opts.InitOpts(width='400px',height='400px'))
        .add(
            "标签0的人群",
            data_list[i],
            radius=[60, 150],
            label_opts=new_label_opts(),
            # legend_opts = opts.LegendOpts(is_show=False)
        )
        .set_global_opts(
            title_opts=opts.TitleOpts(title=f"标签{i}类人群的平均购买情况："),
            legend_opts=opts.LegendOpts(is_show = False),
        )
    )
    # 添加时间轴
    timeline.add(c,str(i))

# 设置自动播放
timeline.add_schema(
    play_interval=5000,     # 自动播放的时间间隔(毫秒)
    is_timeline_show=True,  # 是否在自动播放的时候显示时间线
    is_auto_play=True,      # 是否自动播放
    is_loop_play=False       # 是否循环播放
)

timeline.render("tmp/9类人的平均购买情况(K-means聚类).html")
# timeline.render_notebook()








# 3、关联规则模型
# 定义规则，返回用户购买了的商品类型
def acquire_goods(acquire_list):
    goods_purchased = []
    if acquire_list[0]>0:
        goods_purchased.append("Food")
    if acquire_list[1]>0:
        goods_purchased.append("Fresh")
    if acquire_list[2]>0:
        goods_purchased.append("Drinks")
    if acquire_list[3]>0:
        goods_purchased.append("Home")
    if acquire_list[4]>0:
        goods_purchased.append("Beauty")
    if acquire_list[5]>0:
        goods_purchased.append("Health")
    if acquire_list[6]>0:
        goods_purchased.append("Baby")
    if acquire_list[7]>0:
        goods_purchased.append("Pets")
    return goods_purchased

df_relevance = pd.read_csv("tmp\KMeans_training_data.csv")
goods_list = df_relevance.iloc[:,1:].values.tolist()
# 用来储存 客户购买的商品的列表
acquire_goods_list = []
for good in goods_list:
    # 调用acquire_goods方法来返回客户购买的商品的列表
    a = acquire_goods(good)
    acquire_goods_list.append(a)

# 模型训练
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.frequent_patterns import fpgrowth
from mlxtend.frequent_patterns import association_rules
# 将样本数据转换为适合fp-growth算法的格式
te = TransactionEncoder()
te_ary = te.fit(acquire_goods_list).transform(acquire_goods_list)
df = pd.DataFrame(te_ary, columns=te.columns_)

# 使用fp-growth算法挖掘频繁项集
frequent_itemsets = fpgrowth(df, min_support=0.5, use_colnames=True)

# 关联规则生成，设置最小置信度阈值为0.7
rules = association_rules(frequent_itemsets, metric="confidence", min_threshold=0.7)

print(rules)