import difflib
import sys
import time
import jieba
import grpc
# from mysql.connector.pooling import PooledMySQLConnection
from sklearn.cluster import KMeans
import message_pb2
import message_pb2_grpc
from concurrent import futures
import asyncio
import mysql.connector
from mysql.connector import pooling
import threading
from collections import defaultdict
# from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import feature_extraction
from sklearn.feature_extraction.text import TfidfTransformer, TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd
import numpy as np
import traceback
import json

# 实现线程同步
def synchronized(func):
    func.__lock__ = threading.Lock()

    def lock_func(*args, **kwargs):
        with func.__lock__:
            return func(*args, **kwargs)

    return lock_func

class MySQL_Conn_Pool:
    _instance = None  # 类属性，用于存储单例实例

    # 单例构造
    @synchronized
    def __new__(cls, size, dbconfig):
        if cls._instance is None:
            cls._instance = super().__new__(cls)
        return cls._instance

    # 当创建一个新的类实例时它都会被自动调用
    def __init__(self, size, dbconfig):
        self._pool = self.__init_db_pool(size, dbconfig)

    # 连接数据库进行操作
    def __init_db_pool(self, size, dbconfig):
        connection_pool = pooling.MySQLConnectionPool(
            pool_name="mypool",
            pool_size=size,  # 连接池大小
            **dbconfig
        )
        return connection_pool

    # 从连接池获取连接
    def get_connection(self):
        return self._pool.get_connection()

# gRPC服务类
class Service(message_pb2_grpc.RecmdServiceServicer):
    # 创建线程池
    # _threadPool = ThreadPoolExecutor(max_workers=4, thread_name_prefix="thread_")

    #创建mysql连接池
    dbconfig = {
        "host": "localhost",
        "user": "root",
        "password": "123456",
        "database": "myasio"
    }
    _pool = MySQL_Conn_Pool(5, dbconfig)

    async def GetRecmdList(self, request, context):
        try:
            user_id = request.userId
            shop_id = request.shopId
            res = self._task(self._pool, user_id, shop_id)
            print('最终推荐结果：', res)
            # 编写响应
            for item in res:
                response = message_pb2.GetRecmdRsp(
                    foodId=item
                )
                # 如果成功了就响应
                if (res):
                    yield response
                else:
                    response = message_pb2.GetRecmdRsp(
                        foodId=0
                    )
                    yield response
        except Exception as e:
            response = message_pb2.GetRecmdRsp(
                foodId=0
            )
            print('GetRecmdList error: ', e)
            yield response

    def _prepare_data(self, pool, user_id, shop_id):
        _list_tmp_food = []
        _list_tmp_rating = []
        _dict_food = {}
        _dict_food_rating = {}
        _list_food_id = []
        other_shop_food_list = []

        conn = pool.get_connection()
        # 获取商店的食品列表
        cursor = conn.cursor(prepared=True)
        select_query = "SELECT food_id FROM foods"
        cursor.execute(select_query)
        rows = cursor.fetchall()
        for item in rows:
            _list_food_id.append(item[0])

        cursor1 = conn.cursor(prepared=True)
        # 设置预处理语句
        select_query = "SELECT order_id, user_id, shop_id, shop_list FROM orders"
        # 执行查询
        # cursor1.execute(select_query, (user_id,))
        cursor1.execute(select_query)
        rows = cursor1.fetchall()

        cursor2 = conn.cursor(prepared=True)
        for row in rows:
            if row[2] != shop_id:  # 如果不是想要的shop_id，则不进行统计， 想了想还是要统计的，因为要推荐给店家新的商品
                json_data = json.loads(row[3])
                for food in json_data:
                    if food.get('food_id') is not None:
                        other_shop_food_list.append(food['food_id'])
            #     continue
            select_query = "SELECT food_id, ratings FROM food_ratings WHERE order_id=%s"
            cursor2.execute(select_query, (row[0],))
            rows2 = cursor2.fetchall()

            for row2 in rows2:
                _list_tmp_food.append(row2[0])
                _list_tmp_rating.append(row2[1])

            # 设置映射项
            tmp = str(row[1])
            if (_dict_food.get(tmp) == None):
                _dict_food[tmp] = _list_tmp_food[:]  # 使用切片拷贝，而不是直接等于
            else:
                _dict_food[tmp].extend(_list_tmp_food)

            if (_dict_food_rating.get(tmp) == None):
                _dict_food_rating[tmp] = _list_tmp_rating[:]  # 使用切片拷贝，而不是直接等于
            else:
                _dict_food_rating[tmp].extend(_list_tmp_rating)

            _list_tmp_food.clear()
            _list_tmp_rating.clear()

        # 对字典进行排序，以防错乱
        _dict_food = dict(sorted(_dict_food.items()))
        _dict_food_rating = dict(sorted(_dict_food_rating.items()))

        print('_dict_food: ', _dict_food, ', \"1\"\'s size is: ', len(_dict_food['1']))
        print('_dict_food_rating: ', _dict_food_rating, ', \"1\"\'s size is: ', len(_dict_food_rating['1']))

        conn.close()
        return (_dict_food, _dict_food_rating, other_shop_food_list, _list_food_id)

    # 基于内容的推荐算法，改造成通过食品的名字计算TF-IDF，然后找出推荐项
    def _base_content_predict(self, pool, food_id_list, dict_food, user_id, shop_id_1):
        res_list = list()
        food_name_list = list()
        food_name_comp_list = list()
        food_name_word_list = list()
        food_foodId_name_dict = dict()
        food_name_foodId_dict = dict()
        food_name_shop_id_dict = dict()
        # food_list = list(dict_food[str(user_id)])
        conn = pool.get_connection()
        for item in food_id_list:
            select_query = "SELECT name, shop_id FROM foods WHERE food_id=%s"
            cursor = conn.cursor(prepared=True)
            cursor.execute(select_query, (item,))
            rows = cursor.fetchall()
            for row in rows:
                food_name_list.append(row[0])
                food_foodId_name_dict[str(item)] = row[0]
                food_name_foodId_dict[row[0]] = item
                food_name_shop_id_dict[row[0]] = row[1]

        tmp_str = ""
        last = 0
        # len_list = len(food_name_list)
        # for i in food_name_list:
            # food_name_word_list.append(list(jieba.cut(i)))
            # food_name_word_list.append(" ".join(jieba.cut(i)))
            # tmp_str += i
            # if i%20 == 0:
            #     if food_name_list[last:i] == "":
            #         continue
            #     food_name_comp_list.append(tmp_str.join(food_name_list[last:i]))
            #     tmp_str = ""
            #     last = i
            # else:
            #     if i == len_list-1:
            #         if food_name_list[last:i] == "":
            #             continue
            #         food_name_comp_list.append(tmp_str.join(food_name_list[last:i]))
            #         tmp_str = ""

        # 对列表中每个元素进行分词
        # food_name_word_list = list(jieba.cut(tmp_str))
        # tmp_str = ""
        # for i in food_name_word_list:
        #     tmp_str += i+" "
        # food_name_word_list = [tmp_str]
        food_name_word_list = [" ".join(jieba.cut(t)) for t in food_name_list]
        print("food_name_word_list: ", food_name_word_list)
        # 计算 TF-IDF 矩阵
        vectorizer = TfidfVectorizer()
        # tfidf_matrix = vectorizer.fit_transform(food_name_list)
        # vectorizer = CountVectorizer()
        # vectorizer = TfidfVectorizer()
        v_res = vectorizer.fit_transform(food_name_word_list)
        print("v_res.toarray()", v_res.toarray())
        transformer = TfidfTransformer()
        tfidf_matrix = transformer.fit_transform(v_res)
        # 将 TF-IDF 矩阵转换为 DataFrame（便于查看）
        # tfidf_df = pd.DataFrame(tfidf_matrix.toarray(), columns=vectorizer.get_feature_names_out(), index=food_name_list)
        print("TF-IDF 矩阵：")
        # print(tfidf_df)

        # 找出用户买的次数最多的商品
        dict_weighted = {}      # 权重词典
        # 对数据进行词频统计，词频即可当成权重（用于基于内容的推荐算法）
        tmp = {}  # 临时字典
        for key in dict_food.keys():
            for food_id in dict_food[key]:
                tmp_str = str(food_id)
                tmp[tmp_str] = tmp.get(tmp_str, 0) + 1
            # 对结果进行根据values的降序排序
            res = dict(sorted(tmp.items(), key=lambda d: d[1], reverse=True))
            # 拷贝tmp
            dict_weighted[key] = res.copy()
            tmp.clear()

        print('_dict_weighted is: ', dict_weighted)
        # 返回出现率最高的两个
        # top_list = [int(x) for x in dict_weighted[str(user_id)].keys()]
        top_list = [int(x) for x in dict_weighted.get(str(user_id)).keys()]
        top_list = top_list[0:2]
        print('top_list is: ', top_list)

        # 获取推荐结果
        for food_id in top_list:
            tmp_name = food_foodId_name_dict[str(food_id)]
            # recommend_res = self.recommend_items(tmp_name, food_name_list, food_name_word_list, vectorizer, tfidf_matrix)
            recommend_res = self.recommend_items2(tmp_name, food_name_foodId_dict, food_name_word_list, v_res)
            print('recomd_res:    ------', recommend_res)
            if len(recommend_res) != 0:
                # res_list.append(food_name_foodId_dict[recommend_res[0][0]])
                # res_list.append(food_name_foodId_dict[str(recommend_res[0])])
                res_list.append(recommend_res[0])
        # 在这里进行筛选，筛掉不是本商店的
        tmm_list = list()
        for food_id in res_list:
            if food_name_shop_id_dict[food_foodId_name_dict[str(food_id)]] == shop_id_1:
                tmm_list.append(food_id)

        while len(tmm_list) < 2:
            tmm_list.append(0)
        conn.close()
        return tmm_list

    def recommend_items(self, input_name, item_names, item_names_comp, vectorizer, tfidf_matrix, top_n=3):
        """
        推荐与输入名称相似的物品
        :param input_name: 输入的物品名称
        :param item_names: 所有物品名称列表
        :param tfidf_matrix: TF-IDF 矩阵
        :param top_n: 推荐的数量
        :return: 推荐物品列表
        """
        # 将输入名称转换为 TF-IDF 向量
        input_vector = vectorizer.transform([input_name])

        # 计算输入名称与所有物品名称的余弦相似度
        # similarities = cosine_similarity(input_vector, tfidf_matrix).flatten()

        # 获取相似度最高的前 top_n 个物品
        words = vectorizer.get_feature_names_out()       #获取词袋模型中的所有词语
        weight = tfidf_matrix.toarray()  # 将tf-idf矩阵抽取出来，元素a[i][j]表示j词在i类文本中的tf-idf权重
        idx = 0
        tmp_list = list()
        # 找出input_name在item_names中的下标
        for index in range(len(item_names_comp)):
            if input_name in item_names_comp[index]:
                idx = index
                break
        # 在词袋模型中找到对应下标的类型文本，并找出其中权重最高的前3个词语
        tmp_list = weight[idx]
        sorted_indices = np.argsort(-tmp_list)
        sorted_weights = weight[idx][sorted_indices]
        sorted_words = words[sorted_indices]


        # 取前三个
        tmp_weights_list = sorted_weights[:3]
        tmp_words_list = sorted_words[:3]
        # 在所有物品名称列表中查找包含这三个词语最多的
        recommended_items = self._find_top_matches(item_names, tmp_words_list)


        # top_indices = similarities.argsort()[-top_n:][::-1]
        # # recommended_items = [(item_names[i], similarities[i]) for i in top_indices]
        # recommended_items = [item_names[i] for i in top_indices]
        return recommended_items

    def recommend_items2(self, input_name, food_name_foodId_dict, texts, v_res, n_clusters=3):
        res = list()
        # 使用聚类分组分析
        kmeans = KMeans(n_clusters=n_clusters)
        kmeans.fit(v_res)

        # 分组结果
        clusters = defaultdict(list)
        for i, label in enumerate(kmeans.labels_):
            clusters[label].append(texts[i])
        tmp_res = list(clusters.values())

        # 对结果进行处理，去掉空格
        group_list = list()
        tmp_group_list = list()
        tmp_find_list = list()
        tmp_abc_list = list()
        for index in range(len(tmp_res)):
            for idx in range(len(tmp_res[index])):
                if tmp_res[index][idx] != "":
                    tmp_group_list.append(tmp_res[index][idx].replace(" ", ""))
                    tmp_abc_list.extend(tmp_res[index][idx].split(" "))
            tmp_find_list.append(tmp_abc_list)
            group_list.append(tmp_group_list)
            tmp_abc_list = []
            tmp_group_list = []

        # # 判断input_name在哪一个组
        idx = 0
        name_in_group = list()
        for item in tmp_find_list:
            for it in item:
                if it in input_name:
                    name_in_group.append(idx)
            idx += 1
        if idx == len(tmp_find_list):
            idx = len(tmp_find_list)-1

        # 在每个推荐组中选取最相似的一个，如果没有则选取第一个作为推荐
        for index in name_in_group:
            sim_list = []
            for item in group_list[index]:
                sim_list.append(difflib.SequenceMatcher(None, item, input_name).quick_ratio())
            sorted_pairs = sorted(((val, idx) for idx, val in enumerate(sim_list)), reverse=True)
            sorted_indices = [idx for (val, idx) in sorted_pairs]

            # 取第二个，因为第一个总是自己
            if len(sorted_indices) > 1:
                res.append(food_name_foodId_dict[group_list[index][sorted_indices[1]]])
            # res.append(food_name_foodId_dict[group_list[index][0]])

        # tmp_res_list = list()
        # for item in tmp_res:
        #     for it in item:
        #         if it in input_name:
        #             tmp_res_list.append(item)
        # # 在item_names里面查找匹配的

        return res


    def _find_top_matches(self, target_strings, match_terms, min_results=3, return_scores=False):
        """
        查找匹配度最高的至少 min_results 个字符串

        参数:
            target_strings: 待匹配的字符串列表（必填）
            match_terms: 需要匹配的关键词列表（必填）
            min_results: 要求返回的最小结果数（默认3）
            return_scores: 是否返回匹配分数（默认False）

        返回:
            如果 return_scores=True: 返回列表，格式为 [(字符串, 匹配数), ...]
            如果 return_scores=False: 直接返回字符串列表
        """
        # if not target_strings or not match_terms:
        #     return [] if return_scores else []

        # 计算每个字符串的匹配数
        scored_items = []
        for s in target_strings:
            score = sum(1 for term in match_terms if term in s)
            scored_items.append((s, score))

        # 按匹配数降序排序
        scored_items.sort(key=lambda x: x[1], reverse=True)

        # 处理结果不足的情况
        if len(scored_items) < min_results:
            # 不足时返回所有可用结果（可能少于min_results）
            valid_results = scored_items
        else:
            # 检查是否存在并列排名
            min_score = scored_items[min_results-1][1]
            valid_results = [item for item in scored_items if item[1] >= min_score]

        # 根据参数决定返回格式
        if return_scores:
            return valid_results
        else:
            return [item[0] for item in valid_results]

    def _get_matrixs(self, dict_food, dict_food_rating):
        user_order_list = []
        list_food = []
        list_food_rating = []

        # 进行数据处理
        for key in dict_food.keys():
            for id, rating in zip(dict_food[key], dict_food_rating[key]):
                user_order_list.append(int(key))  # 顺便添加购买的用户
                list_food.append(int(id))
                list_food_rating.append(rating)

        # 这样就得到了_user_order_list、_list_food、_list_food_rating三个列表，可以进行相似度分析
        print('_user_order_list is: ', user_order_list)
        print('_list_food is: ', list_food)
        print('_list_food_rating is: ', list_food_rating)

        # 进行相似度分析
        # 用户-商品评分矩阵
        data = {
            'user_id': user_order_list,
            'food_id': list_food,
            'rating': list_food_rating
        }
        # 创建DataFrame
        df = pd.DataFrame(data)
        # 创建用户-物品评分矩阵
        user_item_matrix = df.pivot_table(index='user_id', columns='food_id', values='rating').fillna(0)
        # 计算欧几里得距离，得出相似度矩阵
        user_similarity = self._cosine_similarity(user_item_matrix.values)
        # 将相似度矩阵转换为DataFrame
        user_similarity_df = pd.DataFrame(user_similarity, index=user_item_matrix.index,
                                          columns=user_item_matrix.index)

        return (user_item_matrix, user_similarity_df)

    # 基于用户的协同过滤算法
    def _base_user_predict(self, pool, food_id_list, user_item_matrix, user_similarity_df, user_id, shop_id):
        tmp_dict = {}
        # 由于不是所有商品都有人买过和评价过，所以没人买过的商品或评价过的就不做推荐
        for id in food_id_list:
            tmp_res = self._predict_rating(user_id, id, user_similarity_df, user_item_matrix)
            tmp_dict[str(id)] = tmp_res

        # 对结果进行根据values的降序排序
        res_dict = dict(sorted(tmp_dict.items(), key=lambda d: d[1], reverse=True))
        print('预测所有商品推荐结果：', res_dict)
        res = []
        i = 0
        for item in res_dict.items():
            if (i < 4):  # 只推荐4个商品
                res.append(int(item[0]))
            i += 1


        # 筛选掉不是本商店的
        conn = pool.get_connection()
        cursor = conn.cursor(prepared=True)
        select_query = "SELECT food_id FROM foods WHERE shop_id=%s"
        cursor.execute(select_query, (shop_id,))
        rows = cursor.fetchall()
        tmp_res2 = list()
        i = 0
        for food_id in rows:
            for food_id2 in res:
                if food_id[0] == food_id2:
                    tmp_res2.append(food_id2)

        conn.close()
        while len(tmp_res2) < 2:
            tmp_res2.append(0)
        print("res is: ", tmp_res2)
        return tmp_res2

    def _task(self, pool, user_id, shop_id):
        # 存放用户-购买列表的字典
        _dict_food = {}
        # 存放用户-购买列表的列表
        _list_food = []
        # 存放用户-评分的字典
        _dict_food_rating = {}
        # 存放用户-评分的列表
        _list_food_rating = []
        # 用户在其他商店中可能感兴趣的商品
        _base_user_other_list = []

        conn = pool.get_connection()

        try:
            # 准备数据
            _dict_food, _dict_food_rating, other_shop_food_list, food_id_list = self._prepare_data(pool, user_id, shop_id)

            # 基于内容的推荐
            _base_content_list = self._base_content_predict(pool, food_id_list, _dict_food, user_id, shop_id)

            _user_item_matrix, _user_similarity_df = self._get_matrixs(_dict_food, _dict_food_rating)
            print("_user_similarity_df: ", _user_similarity_df)

            # 计算出在该本商店中用户可能感兴趣的商品
            _base_user_list = self._base_user_predict(pool, food_id_list, _user_item_matrix, _user_similarity_df, user_id, shop_id)

            # 计算出本用户在其他商店可能感兴趣的商品
            tmp_dict = {}
            # 由于不是所有商品都有人买过和评价过，所以没人买过的商品或评价过的就不做推荐
            for food_id in other_shop_food_list:
                tmp_res = self._predict_rating(user_id, food_id, _user_similarity_df, _user_item_matrix)
                tmp_dict[food_id] = tmp_res
            # 然后找出最高的两个
            tmp_dict = dict(sorted(tmp_dict.items(), key=lambda d: d[1], reverse=True))
            _base_user_other_list = list(tmp_dict.keys())[0:2]

            res = []
            # 按照基于内容后基于用户的顺序推荐，因为用户可能会优先选择买过最多的商品
            res += _base_user_list
            res += _base_content_list
            # 其他商店的商品，可以向店家推荐上架
            res += _base_user_other_list

            # 返回连接
            conn.close()
            return res
        except Exception as e:
            print('task failed: ', traceback.format_exc())
            conn.close()
            return []

    # 计算用户之间的相似度（使用余弦相似度）
    def _cosine_similarity(self, matrix):
        # 归一化矩阵
        norm = np.linalg.norm(matrix, axis=1)
        matrix_norm = matrix / norm[:, np.newaxis]

        # 计算余弦相似度
        similarity = np.dot(matrix_norm, matrix_norm.T)
        return similarity

    # 预测用户对未评分物品的评分
    def _predict_rating(self, user_id, item_id, user_similarity_df, user_item_matrix):
        # 获取目标用户与其他用户的相似度
        user_sim = user_similarity_df[user_id]

        # 如果没有用户购买该物品，则返回0
        if user_item_matrix.get(item_id) is None:
            return 0
        # 获取其他用户对该物品的评分
        item_ratings = user_item_matrix[item_id]

        # 找到对物品有评分的用户
        rated_users = item_ratings[item_ratings > 0].index

        # 如果没有用户对该物品评分，则返回0
        if len(rated_users) == 0:
            return 0

        # 计算加权平均评分
        weighted_sum = np.dot(user_sim[rated_users], item_ratings[rated_users])
        sum_of_weights = np.abs(user_sim[rated_users]).sum()

        if sum_of_weights == 0:
            return 0
        return weighted_sum / sum_of_weights

async def service():
    server = grpc.aio.server()
    message_pb2_grpc.add_RecmdServiceServicer_to_server(Service(), server)
    server.add_insecure_port('[::]:65432')
    await server.start()
    print("Server started on port 65432...")
    try:
        await server.wait_for_termination()  # 阻塞主线程，保持服务器运行
    except asyncio.CancelledError:
        print("Server is shutting down...")
        await server.stop(grace=1)  # 等待 5 秒后停止服务器

# 统计词频
def num_count(marx):
    print('abc')


if __name__ == "__main__":
    # 启动服务
    try:
        asyncio.run(service())
    except KeyboardInterrupt:
        print("Server stopped.")
        exit()