import asyncio
import pickle
import numpy as np
import faiss
import os
import json
import csv
import random
import pandas as pd
import time
import pymysql
import re
from typing import Dict, List, Optional
from random import shuffle

from pymilvus import connections, Collection, MilvusClient
from pymilvus.client.types import LoadState

import redis
from utils import get_gpt_ans,zhipu_API

import logging
logging.basicConfig(level=logging.ERROR, # 设置日志级别为ERROR
                    format='%(asctime)s - %(levelname)s - %(message)s', # 设置日志格式
                    filename='error.log', # 指定日志文件名
                    filemode='a') # 文件模式，'a'表示追加

def Singleton(cls):
    _instance = {}

    def _singleton(*args, **kargs):
        if cls not in _instance:
            _instance[cls] = cls(*args, **kargs)
        return _instance[cls]

    return _singleton

def serialize(obj):
    return pickle.dumps(obj)

def deserialize(data):
    return pickle.loads(data)

 # key 是<user_id, group_code, labels>
class rKey:
    
    def __init__(self, user_id, group_code, labels):
        self.user_id = user_id
        self.group_code = group_code
        self.labels = labels
        
    def __hash__(self):
        return hash((self.user_id, self.group_code, frozenset(self.labels)))
    
    def __eq__(self, other):
        return self.user_id == other.user_id and self.group_code == other.group_code and set(self.labels) == set(other.labels)
    
    def __repr__(self):
        return f"Key({self.user_id}, {self.group_code}, {frozenset(self.labels)})"
    
# value 是一个队列，队列中的元素是<record_list, topic_name, topic_desc>
class rItem:
    
    def __init__(self, record_list, topic_name, topic_desc):
        self.record_list = record_list
        self.topic_name = topic_name
        self.topic_desc = topic_desc

#TODO: 后续要维护表的更新;"catagory_path":"./data/loan_book/带图书标签的数据/books_categories.csv"
# books_categories.db  
# region
@Singleton
class BookRecommendationSystem:
    def __init__(self, data_config_path='./configs/config.json', capacity=5000):
        """
        初始化推荐系统
        :param data_config: 包含嵌入文件路径的配置字典
        """
        self.redis_client = redis.Redis(host='10.240.1.3', port=13296, password="white&is329tygpq26", db=2)
        self.capacity = capacity
        with open(data_config_path, 'r') as file: 
            self.configs = json.load(file)
        self.api = zhipu_API()
        self.config_dir = os.path.dirname(data_config_path)
        self.data_config_dict = {}
        # self._load_data()
        self.cursor = self._connect_sql(data_config_path)
        self.milvus_client = self._connect_milvus(data_config_path)
        # todo 将本地config配置放到mysql里面，进行查询操作

        
    def _connect_milvus(self,db_config_path):
        with open(db_config_path, 'r') as file:  
            db_config = json.load(file)
        milvus_client = MilvusClient(uri=db_config["milvusConnectConfig"]["uri"], db_name=db_config["milvusConnectConfig"]["db_name"])
        return milvus_client
        
    def _connect_sql(self,db_config_path):
        with open(db_config_path, 'r') as file:  
            db_config = json.load(file)
        conn = pymysql.connect(
            host=db_config["dbConnectConfig"]["host"],
            user=db_config["dbConnectConfig"]["user"],
            passwd=db_config["dbConnectConfig"]["passwd"],
            port=db_config["dbConnectConfig"]["port"],
            db=db_config["dbConnectConfig"]["db"],
            charset=db_config["dbConnectConfig"]["charset"]
        )
        return conn.cursor()
    
    # def _load_data(self):
    #     for group_code, data_config_name in self.configs["groupCode2json"].items():
    #         # print("group_code",self.configs["groupCode2json"].items())
    #         # print("data_config_name",data_config_name)

    #         with open(os.path.join(self.config_dir,data_config_name), 'r') as file:  
    #             data_config = json.load(file)
    #         self.data_config_dict[group_code] = data_config



            # self.user_embeddings_dict[group_code] = self._load_user_embeddings(data_config)
            # self.item_index_dict[group_code] = self._load_item_index(data_config)
            # self.tag2book_dict[group_code] = self.data_config_dict[group_code]['tag2recordids_path']

            # with open(self.data_config_dict[group_code]['user_id_to_index'], 'r') as file:
            #     self.user2index_dict[group_code] = json.load(file)

    # def _load_user_embeddings(self,data_config):
    #     """
    #     私有方法：加载用户嵌入向量
    #     """
    #     user_embedding_path = os.path.join(data_config['embedding_saved_dir'], "user_embedding.npy")
    #     # print("test",np.load(user_embedding_path)[:10])
    #     return np.load(user_embedding_path)

    # def _load_item_index(self,data_config):
    #     """
    #     私有方法：加载物品的 Faiss 索引
    #     """
    #     item_index_path = os.path.join(data_config['embedding_saved_dir'], "item_embedding.index")
    #     item_index = faiss.read_index(item_index_path)
    #     return item_index

    def _get_group_config(self, group_code: str) -> Optional[Dict]:
        """
        获取单个group配置记录
        
        Args:
            group_code: 组代码
            
        Returns:
            包含group配置信息的字典，如果不存在返回None
        """
        sql = "SELECT * FROM group_config WHERE group_code = %s"
        self.cursor.execute(sql, (group_code,))
        result = self.cursor.fetchone()
        
        if result:
            columns = [col[0] for col in self.cursor.description]
            return dict(zip(columns, result))
        return None

    def _get_user_embedding(self, user_id, configs):
        """
        根据用户 ID 获取用户嵌入向量
        """
        # user_id = 54319
        
        # groupName = self.configs["groupCode2name"][group_code]
        # 读取数据库中的配置文件
        groupName = configs['group_name']
        
        sql = f"SELECT `embedding_index` FROM {groupName}_all_user_data WHERE USER_ID = %s"
        # 执行查询
        self.cursor.execute(sql, (user_id,))
        result = self.cursor.fetchone()
        # print("user embedding results")
        logging.info(f"userID: {user_id}")
        logging.info(f"get user embedding result: {result}")
        user_index = result[0]
        # user_index = self.user2index_dict[group_code][str(user_id)]  #原来的代码
        # return self.user_embeddings_dict[group_code][user_index].reshape(1, -1)
        # 计时
        # start = time.time()
        
        if self.milvus_client.get_load_state(collection_name=f"{groupName}_user")['state'] == LoadState.NotLoad:
            self.milvus_client.load_collection(collection_name=f"{groupName}_user")
        embedding = self.milvus_client.get(
            collection_name=f"{groupName}_user",
            ids=[user_index],
            output_fields=["embedding"]
        )[0]["embedding"]
        embedding = np.array(embedding).reshape(1, -1)
        # end = time.time()
        # logging.info(f"get user embedding time(s): {end-start}")
        return embedding

    def _search_items(self, user_embedding, configs, k=20):
        """
        执行搜索，找到最近的 k 个物品
        """
        # _, I = self.item_index_dict[group_code].search(user_embedding, k)
        # item_ids = I[0].tolist()
        # start = time.time()
        if self.milvus_client.get_load_state(collection_name=f"{configs['group_name']}_item")['state'] == LoadState.NotLoad:
            self.milvus_client.load_collection(collection_name=f"{configs['group_name']}_item")
        res_dict = self.milvus_client.search(
            collection_name=f"{configs['group_name']}_item",
            anns_field="embedding",
            data=user_embedding,
            # param={"metric_type": "L2", "nprobe": 10},
            limit=k,
            output_fields=["ITEM_ID"]
        )
        logging.info(f"search result: {res_dict}")
        limit = min(k, len(res_dict[0]))
        item_ids = [res_dict[0][i]["entity"]["ITEM_ID"] for i in range(1, limit)]
        # end = time.time()
        # logging.info(f"search time(s): {end-start}")
        logging.info(f"search result: {item_ids}")
        # 连接all_item_data库，根据item id找record id
        sql_select = "SELECT RECORD_ID FROM all_item_data WHERE ITEM_ID IN %s"
        # print("config_dict",self.data_config_dict)
        groupName = configs['group_name']
        # print('group',groupName)
        sql_select = sql_select.replace('all_item_data', f'{groupName}_all_item_data')
        # TODO：这个应该也不是item_id，应该是索引值？也不一定，可能就是item_id，毕竟训练的时候用user key2index记录了借阅图书的item id
        self.cursor.execute(sql_select,(item_ids,))
        results = self.cursor.fetchall()
        record_ids = [record[0] for record in results]
        return record_ids

    def _add_random_books(self, record_ids, labels, configs, k=10, min_year=0):
        """
        私有方法：从书库中随机选择 k 本符合条件的书并添加到 record_ids 中
        """
        groupName = configs['group_name']
        available_record_ids = set()
        for label in labels:
            try:
                self.cursor.execute(f"SELECT idList FROM {groupName}_tag2recordId WHERE tag = '{label}';")
            except Exception as e:
                print(e)
            results = self.cursor.fetchall()
            idList = results[0][0]
            while idList.endswith('|'):
                idList = idList[:-1]
            temp_list = list(map(int, idList.split('|')))
            available_record_ids.update(temp_list)


        # tag2book_path = self.tag2book_dict[group_code]
        # chunksize = 1000  
        # iterator = pd.read_csv(tag2book_path, usecols=['tag', 'RECORD_ID'], chunksize=chunksize)
        # tag_df_filtered = pd.DataFrame()
        # for chunk in iterator:
        #     filtered_chunk = chunk[chunk['tag'].isin(labels)]
        #     tag_df_filtered = pd.concat([tag_df_filtered, filtered_chunk])
        # tag_df_filtered['RECORD_ID'] = tag_df_filtered['RECORD_ID'].apply(lambda x: list(map(int, x.split('|'))) if x else [])
        
        # available_record_ids = set()
        # for row in tag_df_filtered['RECORD_ID']:
        #     available_record_ids.update(row)
        
        available_record_ids -= set(record_ids)
        available_record_ids = list(available_record_ids)
        newer_record_ids = []
        
        # 分批查询
        batch_size = 1000
        for i in range(0, len(available_record_ids), batch_size):
            batch_ids = available_record_ids[i:i+batch_size]
            placeholders = ', '.join(['%s'] * len(batch_ids))
            sql = f"SELECT RECORD_ID FROM {groupName}_book_attr WHERE RECORD_ID IN ({placeholders}) AND PUB_YEAR >= %s;"
            try:
                self.cursor.execute(sql, tuple(batch_ids) + (min_year,))
                newer_record_ids.extend([result[0] for result in self.cursor.fetchall()])
            except Exception as e:
                logging.error(f"Error querying newer books: {e}")
        
        selected_record_ids = random.sample(newer_record_ids, k)
        
        # logging.error(f"available_record_ids: {len(available_record_ids)}")
        # logging.error(f"newer_record_ids: {len(newer_record_ids)}")
        # 调试打印出版年份
        # for record_id in selected_record_ids:
        #     self.cursor.execute(f"SELECT PUB_YEAR FROM {groupName}_book_attr WHERE RECORD_ID = %s;", (record_id,))
        #     result = self.cursor.fetchone()
        #     logging.error(f"record_id: {record_id}, pub_year: {result[0]}")

        return list(selected_record_ids)+record_ids

    async def get(self, key: rKey):
        redis_key = key.__repr__()
        # print(redis_key)
        if not self.redis_client.exists(redis_key):
            # 新建节点
            # print("hello")
            logging.warning(f"key not in redis, key={key.user_id},{key.group_code},{key.labels}")
            # logging.error(f"{self.redis_client.llen(redis_key)}")
            # self.redis_client.lpush(redis_key)
        user_id = key.user_id
        group_code = key.group_code
        labels = key.labels
        queue_len = self.redis_client.llen(redis_key)
        institution_configs = self._get_group_config(group_code=group_code)
        # logging.error(f"queue size is {queue_len}")
        if queue_len <= 1:
            # print('queue size <= 1')
            # 调用生成推荐列表的接口
            try:
                user_embedding = self._get_user_embedding(user_id, configs=institution_configs)
                # print("user embedding")
                # print(user_embedding)
                # logging.info(user_embedding)
                recommend_record_ids = self._search_items(user_embedding, configs=institution_configs, k=500)
                recommend_record_ids = random.sample(recommend_record_ids,min(100, len(recommend_record_ids)))
                record_ids = self._add_random_books(recommend_record_ids, labels, configs=institution_configs, k=100, min_year=2010)
                shuffle(record_ids)

                k = 30
                record_lists = [record_ids[i:i+k] for i in range(0, min(len(record_ids), k*5), k)]

                start_i = 0
                if queue_len == 0:
                    # print('queue is empty')
                    start_i += 1
                    topic_name, topic_desc = await self.generate_topic_name(record_lists[0],configs=institution_configs)
                    # queue.pushright(rItem(record_lists[0], topic_name, topic_desc))
                    self.redis_client.rpush(redis_key, serialize(rItem(record_lists[0], topic_name, topic_desc)))
                
                # 其他放到异步任务队列中生成
                asyncio.ensure_future(self.process_record_lists(record_lists, start_i, institution_configs, redis_key))
                
            except Exception as e:
                # print("use")
                logging.error("发生错误：", exc_info=True) # exc_info=True会记录异常信息和堆栈跟踪        

        return deserialize(self.redis_client.lpop(redis_key))
    
    async def generate_topic_name(self,record_ids, configs):
        sql_select = "SELECT TITLE_S FROM book_attr WHERE RECORD_ID IN %s"
        groupName = configs['group_name']
        sql_select = sql_select.replace('book_attr', f'{groupName}_book_attr')
        self.cursor.execute(sql_select,(record_ids,))
        results = self.cursor.fetchall()
        # print("results")
        # print(results)
        book_infos = [record[0] for record in results]
        # print(book_infos)
        # prompt = f"根据以下信息，请生成一个吸引人且具有创意的10个字以内的中文'书单名'与50个字以内的'书单简介'。\
        #             - 书单包含以下书籍：{book_infos}\
        #             请确保书单名反映书单的特点和主题。\
        #             "
        prompt = f"根据以下信息，请生成一个个性化的有针对性的且潮流的10个字以内的中文'书单名'与50个字以内的'书单简介'。\
                    - 书单包含以下书籍：{book_infos}\
                    请确保生成的文本中有书单名和简介,并且书单名和简介够反映这些书籍的共同主题或特色。\
                    "
        result = zhipu_API().chat_with_messages(prompt)

        # TODO： 这里由于使用了大模型，结果不健壮，后续需要考虑这部分的改进


        lines = result.split('\n')
        for line in lines:
            if '书单名' in line:
                topic_name = line.split('书单名：')[-1].strip()
                topic_name = re.sub(r'\*+', '', topic_name)
            elif '简介' in line:
                topic_desc = line.split('简介：')[-1].strip()
                topic_desc = re.sub(r'\*+', '', topic_desc)
       
        return topic_name,topic_desc
    
    async def process_record_lists(self, record_lists, start_i, configs, redis_key):
        try:
            for record_list in record_lists[start_i:]:
                topic_name,topic_desc = await self.generate_topic_name(record_list, configs=configs)
                # queue.pushright(rItem(record_list, topic_name, topic_desc))
                self.redis_client.rpush(redis_key, serialize(rItem(record_list, topic_name, topic_desc)))
        except Exception as e:
            logging.error(f"Error processing record {record_list}: {e}", exc_info=True)

    async def generate_book_list(self, user_id, group_code, labels):
        """
        生成书单推荐:前三十本为标签相关，后二十本为推荐系统推荐的书籍
        :param user_id: 用户 ID
        :param group_code: 机构代码
        :param labels: 用户感兴趣的标签列表
        :return: 推荐结果字典
        """

        try:
            # print("fas")
            key = rKey(user_id, group_code, labels)
            result = await self.get(key)
            print("hello")
            print("result.record_list", result.record_list)
            return {
                'recordIds': result.record_list,
                'topicName': result.topic_name,
                'topicDesc': result.topic_desc
            }
        except Exception as e:
            
            logging.error("发生错误：", exc_info=True) # exc_info=True会记录异常信息和堆栈跟踪

test_data = {"inf_enc":"bd6604cd23fcbaea331c2c430e9cd3ef","userId":9,"token":"279D4B5D5F2FD5FE","labels":["诗歌","言情","心理","经典","日本漫画","文学","文化","历史","爱情","诗词","儿童文学","中国文学","名著","推理小说","互联网","程序","人物传记","杂文"],"timestamp":1733111357088,"groupCode":"200300"}
# data_config_path = './configs/config.json'  
db_config_path = './configs/db_configs.json'
# with open(file_path, 'r') as file:  
#     data_config = json.load(file)

recommendation_system = BookRecommendationSystem(data_config_path=db_config_path)
# # recommendation = recommendation_system.generate_book_list(user_id=76, group_code='200572.0', labels=['经济', '历史'])

async def main():
    recommendation = await recommendation_system.generate_book_list(user_id=test_data['userId'], group_code=test_data['groupCode'], labels=test_data['labels'])
    print("-------------------------")
    print(recommendation)

if __name__ == "__main__":
    asyncio.run(main())