import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from  getFuncton import get_train
from sklearn.metrics.pairwise import cosine_similarity
import walf
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os

# def get_train(data):
#     userId = data['userId'].unique()
#     movieId=data['movieId'].unique()
#     train=pd.DataFrame()
#
#     for i in userId:
#         row=data[data['userId']==i].head(1)
#         train = pd.concat([train, row])
#
#
#
#     for i in movieId:
#         row = data[data['movieId'] == i].head(1)
#         train = pd.concat([train, row])
#
#     train = pd.concat([train, data.sample(int(data.shape[0] * 0.7))])
#
#     train=train.drop_duplicates()
#     test = data.drop(train.index)
#     return (train,test)

err4=[]
err5=[]
err6=[]
err7=[]
err8=[]
err9=[]
err10=[]
def k(data,test,n_clusters):
    user_m, rating, error = walf.walf_getUser(data, test, 0.2)
    user_idx = user_m.index.unique()
    num_users = len(user_idx)
    cosine_similarities = cosine_similarity(user_m)
    cosine_similarities_df = pd.DataFrame(cosine_similarities, index=user_idx, columns=user_idx)
    # print(cosine_similarities_df)
    n_clusters = 4
    kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(cosine_similarities)
    # 输出每个样本所属的簇
    cluster_labels = kmeans.labels_
    # print("Cluster labels:", cluster_labels)
    clusters = {}
    for i, label in enumerate(cluster_labels):
        if label not in clusters:
            clusters[label] = []
        clusters[label].append(rating.iloc[i])
    # after_rating=pd.DataFrame(index=user_index)
    # 输出每个簇的子矩阵
    other = []
    error_mean = 0
    for cluster_label, cluster_data in clusters.items():
        # print("Cluster", cluster_label)
        cluster_df = pd.DataFrame(cluster_data)
        part_m = pd.DataFrame()
        # 循环迭代每个索引值，将对应的行添加到结果中
        for index_value in cluster_df.index:
            # 获取索引值对应的所有行
            index_rows = data.loc[data['userId'] == index_value]
            # 将这些行添加到结果中
            part_m = pd.concat([part_m, index_rows])
        all = get_train(part_m)
        err_part = walf.walf(all[0], all[1])
        # print(part_m.shape[0])
        error_mean += err_part * part_m.shape[0]
        other.append(cluster_df)
    error_mean = error_mean / data.shape[0]
    print("mean:", +error_mean)
    con = pd.concat(other)
    return error_mean

for i in range(50):

    data= pd.read_csv('ratings.csv',sep="\s+")
    i=0
    all=get_train(data)
    train = all[0]
    test = all[1]
    data=train
    err4.append(k(data,test,4))
    err5.append(k(data, test, 5))
    err6.append(k(data, test, 6))
    err7.append(k(data, test, 7))
    err8.append(k(data, test, 8))
    err9.append(k(data, test, 9))
    err10.append(k(data, test, 10))






























z = np.arange(0, 50)
plt.plot(z-0.15, err4, label='4')
plt.plot(z+0.15, err5, label='5')
plt.plot(z-0.15, err6, label='6')
plt.plot(z+0.15, err7,  label='7')
plt.plot(z-0.15, err8, label='8')
plt.plot(z+0.15, err9,  label='9')
plt.plot(z-0.15, err10, label='10')

filename = os.path.expanduser("find_k")
plt.savefig(filename)
plt.show()

