import mysql.connector
import numpy as np
from sklearn.cluster import KMeans
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from matplotlib import pyplot as plt

# 解决pyplot绘图时标题中文乱码
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 从文件中读取数据库相关信息
with open("database_info", "r") as db:
    db_info = db.read().split(",")
    db_host = db_info[0]
    db_user = db_info[1]
    db_password = db_info[2]

# 配置数据库连接信息
my_db = mysql.connector.connect(
    host=db_host,
    user=db_user,
    passwd=db_password,
    database="artificial_intelligence",
    auth_plugin='mysql_native_password'
)

# 从数据库获取数据
df = pd.read_sql('select * from income', con=my_db)

# 数据归一化
scaler = MinMaxScaler()
scaler.fit(df[['Income']])
df['Income'] = scaler.transform(df[['Income']])
scaler.fit(df[['Age']])
df['Age'] = scaler.transform(df[['Age']])

# 使用手肘法估计最佳的k
sse = []
k_rng = range(1, 10)
for k in k_rng:
    km = KMeans(n_clusters=k)
    km.fit(df[['Age', 'Income']])
    sse.append(km.inertia_)
plt.xlabel('k')
plt.ylabel('Sum of squared error')
plt.plot(k_rng, sse)
plt.title('使用手肘法估计最佳的k')
plt.show()


# 使用欧式距离
def distance_euclidean(vec1, vec2):
    return np.sqrt(np.sum(np.square(vec1 - vec2)))


# 使用余弦距离
def distance_cosine(vec1, vec2):
    return 1 - np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))


def kMeans(data, k, d):
    # 初始化质心
    K = np.random.uniform(0, 1, (k, data.shape[1]))
    print("初始化质心：")
    print(K)
    # 创建一个数组用来存储聚类结果
    ret = np.zeros((data.shape[0], 2))
    flag = True
    while flag:
        flag = False
        for i in range(data.shape[0]):
            print("数据的每一行：{}".format(data[i]))
            minDist = np.inf
            minIndex = -1
            print(K.shape[1])
            for j in range(K.shape[0]):
                print("第{}个质心点{}".format(j, K[j]))
                # 计算数据中的每个点到聚类中心的距离
                if d == 'distance_euclidean':
                    ds = distance_euclidean(data[i], K[j])
                else:
                    ds = distance_cosine(data[i], K[j])
                print("距离：{}".format(ds))
                if ds < minDist:
                    minDist = ds
                    minIndex = j
                    print("距离和簇中心:{}   {}".format(ds, str(j)))
            # 每次计算完一行数据到质心的距离后，更新ret矩阵的结果（将数据点分给距离其最近的簇）
            ret[i][0] = minDist
            ret[i][1] = minIndex
        print()
        # 对每个簇，计算簇中所有点的均值并将均值作为质心
        for i in range(k):
            cluster = data[ret[:, 1] == i]
            print(cluster)
            if len(cluster) == 0:
                pass
            else:
                center = np.mean(cluster, axis=0)
                print(center)
                if (center == K[i]).all():
                    pass
                else:
                    flag = True
                    K[i] = center
    # 质心不发生改变
    for i in range(len(K)):
        print("质心点为：{}".format(K[i]))
    data_c = np.c_[data, ret]
    data_c = pd.DataFrame(data_c)
    print("data_c:")
    print(data_c)

    # 将结果可视化
    df1 = data_c[data_c[3] == 0]
    df2 = data_c[data_c[3] == 1]
    df3 = data_c[data_c[3] == 2]
    plt.scatter(df1[0], df1[1], alpha=0.5)
    plt.scatter(df2[0], df2[1], alpha=0.5)
    plt.scatter(df3[0], df3[1], alpha=0.5)
    center_x = K[:, 0]
    center_y = K[:, 1]
    plt.scatter(center_x, center_y, marker="X")
    plt.xlabel('年龄')
    plt.ylabel('收入')
    plt.title('使用KMeans聚类后数据({})'.format(d))
    plt.show()


if __name__ == '__main__':
    # 原始数据可视化
    data = np.array(df)
    data = np.delete(data, 0, axis=1)
    plt.scatter(data[:, 0], data[:, 1])
    plt.xlabel('年龄')
    plt.ylabel('收入')
    plt.title('原始数据')
    plt.show()

    # 使用KMeans聚类并可视化数据
    kMeans(data, 3, 'distance_euclidean')
    kMeans(data, 3, 'distance_cosine')