import numpy as np
import faiss
import os
import json
import csv
import random
import pandas as pd
import time
import pymysql

from utils import get_gpt_ans
#TODO: 后续要维护表的更新;"catagory_path":"./data/loan_book/带图书标签的数据/books_categories.csv"
# books_categories.db  
# region
class BookRecommendationSystem:
    def __init__(self, data_config_path='./configs/config.json'):
        """
        初始化推荐系统
        :param data_config: 包含嵌入文件路径的配置字典
        """
        # self.data_config = data_config
        # self.user_embeddings = self._load_user_embeddings()
        # self.item_index = self._load_item_index()
        # self.tag2book_path = self.data_config['tag2recordids_path']
        with open(data_config_path, 'r') as file: 
            self.configs = json.load(file)
        self.json_dir = os.path.dirname(data_config_path)
        self.data_config_dict = {}

        self.user_embeddings_dict = {}
        self.item_index_dict = {}
        self.tag2book_dict = {}
        self.user2index_dict = {}
        self._load_data()
        self.cursor = self._connect_sql(data_config_path)
        # self.tag2book_path_dict = self.data_config['tag2recordids_path']
    def _connect_sql(self,db_config_path):
        with open(db_config_path, 'r') as file:  
            db_config = json.load(file)
        conn = pymysql.connect(
            host=db_config["dbConnectConfig"]["host"],
            user=db_config["dbConnectConfig"]["user"],
            passwd=db_config["dbConnectConfig"]["passwd"],
            port=db_config["dbConnectConfig"]["port"],
            db=db_config["dbConnectConfig"]["db"],
            charset=db_config["dbConnectConfig"]["charset"]
        )
        return conn.cursor()
    def _load_data(self):
        for group_code, data_config_name in self.configs["groupCode2json"].items():
            # print("group_code",self.configs["groupCode2json"].items())
            # print("json_dir",self.json_dir)
            # print("data_config_name",data_config_name)

            with open(os.path.join(self.json_dir,data_config_name), 'r') as file:  
                data_config = json.load(file)
            self.data_config_dict[group_code] = data_config
            self.user_embeddings_dict[group_code] = self._load_user_embeddings(data_config)
            self.item_index_dict[group_code] = self._load_item_index(data_config)
            self.tag2book_dict[group_code] = self.data_config_dict[group_code]['tag2recordids_path']

            with open(self.data_config_dict[group_code]['user_id_to_index'], 'r') as file:
                self.user2index_dict[group_code] = json.load(file)

    def _load_user_embeddings(self,data_config):
        """
        私有方法：加载用户嵌入向量
        """
        user_embedding_path = os.path.join(data_config['embedding_saved_dir'], "user_embedding.npy")
        # print("test",np.load(user_embedding_path)[:10])
        return np.load(user_embedding_path)

    def _load_item_index(self,data_config):
        """
        私有方法：加载物品的 Faiss 索引
        """
        item_index_path = os.path.join(data_config['embedding_saved_dir'], "item_embedding.index")
        item_index = faiss.read_index(item_index_path)
        return item_index

    def _get_user_embedding(self, user_id,group_code):
        """
        根据用户 ID 获取用户嵌入向量
        """
        # user_id = 54319
        user_index = self.user2index_dict[group_code][str(user_id)]
#        print("user_index",user_index)
        return self.user_embeddings_dict[group_code][user_index].reshape(1, -1)

    def _search_items(self, user_embedding,group_code, k=20):
        """
        执行搜索，找到最近的 k 个物品
        """
        _, I = self.item_index_dict[group_code].search(user_embedding, k)
        item_ids = I[0].tolist()
        # 连接all_item_data库，根据item id找record id
        sql_select = "SELECT RECORD_ID FROM all_item_data WHERE ITEM_ID IN %s"
        # print("config_dict",self.data_config_dict)
        groupName = self.configs["groupCode2name"][group_code]
        # print('group',groupName)
        sql_select = sql_select.replace('all_item_data', f'{groupName}_all_item_data')
        # TODO：这个应该也不是item_id，应该是索引值？也不一定，可能就是item_id，毕竟训练的时候用user key2index记录了借阅图书的item id
        self.cursor.execute(sql_select,(item_ids,))
        results = self.cursor.fetchall()
        record_ids = [record[0] for record in results]
        return record_ids

    def _add_random_books(self, record_ids, labels,group_code, k=10):
        """
        私有方法：从书库中随机选择 k 本符合条件的书并添加到 record_ids 中
        """
        tag2book_path = self.tag2book_dict[group_code]
        chunksize = 1000  
        iterator = pd.read_csv(tag2book_path, usecols=['tag', 'RECORD_ID'], chunksize=chunksize)
        tag_df_filtered = pd.DataFrame()
        for chunk in iterator:
            filtered_chunk = chunk[chunk['tag'].isin(labels)]
            tag_df_filtered = pd.concat([tag_df_filtered, filtered_chunk])
        tag_df_filtered['RECORD_ID'] = tag_df_filtered['RECORD_ID'].apply(lambda x: list(map(int, x.split('|'))) if x else [])
        
        available_record_ids = set()
        for row in tag_df_filtered['RECORD_ID']:
            available_record_ids.update(row)
        available_record_ids -= set(record_ids)
        available_record_ids = list(available_record_ids)
        selected_record_ids = random.sample(available_record_ids, k)


        return record_ids+list(selected_record_ids)
    
    def generate_topic_name(self,record_ids,group_code):
        
        # catagory_path = self.data_config_dict[group_code]['catagory_db']
        # import sqlite3
        # conn = sqlite3.connect(catagory_path)
        # cursor = conn.cursor()
        # query = "SELECT book_info,tag FROM books_categories WHERE RECORD_ID IN ({})".format(','.join('?' for _ in record_ids))
        # cursor.execute(query, record_ids)
        # books = cursor.fetchall()
        # cursor.close()
        # conn.close()

        sql_select = "SELECT TITLE_S FROM book_attr WHERE RECORD_ID IN %s"
        groupName = self.configs['groupCode2name'][group_code]
        sql_select = sql_select.replace('book_attr', f'{groupName}_book_attr')
        self.cursor.execute(sql_select,(record_ids,))
        results = self.cursor.fetchall()
        book_infos = [record[0] for record in results]
        # print(book_infos)
        prompt = f"根据以下信息，请生成一个吸引人且具有创意的10个字以内的中文'书单名'与50个字以内的'书单简介'。\
                    - 书单包含以下书籍：{book_infos}\
                    请确保书单名反映书单的特点和主题。\
                    "
        # print("promp: ",prompt)
        # book_infos = []
        # labels = set()
        # for book_info,tag_str in books:
        #     book_infos.append(book_info)
        #     # print(f"book_info:{book_info}, tag_str:{tag_str}")
        #     labels.update(tag_str.split('|'))

        # prompt = f"根据以下信息，请生成一个吸引人且具有创意的10个字以内的中文'书单名'与50个字以内的'书单简介'。\
        #             - 书单包含以下书籍：{book_infos}\
        #             - 主要分类标签：{labels}\
        #             请确保书单名反映书单的特点和主题。\
        #             "
        start_time = time.time()
        result = get_gpt_ans(prompt)
        print("single big model time:",time.time()-start_time)
        # result = """
        # 书单名：书 \n
        # 简介：简介 \n
        # """
        # import warnings
        # with warnings.catch_warnings():
        #     warnings.simplefilter("ignore")
        #     result = get_gpt_ans(prompt)
        # print(book_infos)
        
        # TODO： 这里由于使用了大模型，结果不健壮，后续需要考虑这部分的改进
        
        topic_name = "未知书单名"
        topic_desc = "未知书单简介"
        lines = result.split('\n')
        for line in lines:
            if '书单名' in line:
                topic_name = line.split('书单名：')[-1].strip()
            elif '简介' in line:
                topic_desc = line.split('简介：')[-1].strip()
        
        # print('topic_name,topic_desc',topic_name,topic_desc)
       
        return topic_name,topic_desc

    def generate_book_list(self, user_id, group_code, labels,book_num=20):
        """
        生成书单推荐
        :param user_id: 用户 ID
        :param group_code: 机构代码
        :param labels: 用户感兴趣的标签列表
        :return: 推荐结果字典
        """
        user_embedding = self._get_user_embedding(user_id,group_code)
        record_ids = self._search_items(user_embedding,group_code,k=100)
        record_ids = self._add_random_books(record_ids, labels,group_code,k=100)
        # 确保返回20本图书，如果不足20本则返回所有结果.

        record_ids = random.sample(record_ids,book_num)
        topic_name,topic_desc = self.generate_topic_name(record_ids,group_code)
        return {
            'recordIds': record_ids,
            'topicName': topic_name,
            'topicDesc': topic_desc
        }

# 使用示例

data_config_path = './configs/config.json'  
# with open(file_path, 'r') as file:  
#     data_config = json.load(file)

recommendation_system = BookRecommendationSystem(data_config_path)
#recommendation = recommendation_system.generate_book_list(user_id=67452, group_code='200572.0', labels=['经济', '历史'])
# recommendation = recommendation_system.generate_book_list(user_id=67452, group_code='200572.0', labels=['经济', '历史'])
# recommendation = recommendation_system.generate_book_list(user_id=7452, group_code='200572.0', labels=['经济', '历史'])

#recommendation = recommendation_system.generate_book_list(user_id=141192, group_code='200157', labels=['经济', '历史'])
# recommendation = recommendation_system.generate_book_list(user_id=12, group_code='000000', labels=['摄影', '历史'])
# print(recommendation)
# record_ids = [2808146, 1850101, 1966858, 1374427, 1400884, 1871218, 1780442, 1457609, 1774923, 1531045]
# recommendation_system.generate_topic_name(record_ids,group_code='200572.0')

#endregion

# start_time = time.time()
# print("start time",start_time)
# for i in range(100):
#     s1 = time.time()
#     recommendation = recommendation_system.generate_book_list(user_id=4546, group_code='200157', labels=['经济', '历史'])
#     print("total time: ",time.time()-s1)
# # for id in [4546,4854,3468,5554,8864,10646]:
# #     for group in ['200572.0','200157','000000']:
# #         recommendation = recommendation_system.generate_book_list(user_id=id, group_code=group, labels=['经济', '历史'])
# #         print(recommendation)

# # print("end time",time.time())
# print("total time",time.time()-start_time)