import numpy as np
import faiss
import os
import json
import csv
import random
import pandas as pd

from utils import get_gpt_ans
#TODO: 后续要维护表的更新;"catagory_path":"./data/loan_book/带图书标签的数据/books_categories.csv"
# region
class BookRecommendationSystem:
    def __init__(self, data_config):
        """
        初始化推荐系统
        :param data_config: 包含嵌入文件路径的配置字典
        """
        self.data_config = data_config
        self.user_embeddings = self._load_user_embeddings()
        self.item_index = self._load_item_index()

    def _load_user_embeddings(self):
        """
        私有方法：加载用户嵌入向量
        """
        user_embedding_path = os.path.join(self.data_config['embedding_saved_dir'], "user_embedding.npy")
        return np.load(user_embedding_path)

    def _load_item_index(self):
        """
        私有方法：加载物品的 Faiss 索引
        """
        item_index_path = os.path.join(self.data_config['embedding_saved_dir'], "item_embedding.index")
        item_index = faiss.read_index(item_index_path)
        return item_index

    def _get_user_embedding(self, user_id):
        """
        根据用户 ID 获取用户嵌入向量
        """
        return self.user_embeddings[user_id].reshape(1, -1)

    def _search_items(self, user_embedding, k=20):
        """
        执行搜索，找到最近的 k 个物品
        """
        _, I = self.item_index.search(user_embedding, k)
        return I[0].tolist()
    #region
        # def _load_books(self):
        #     """
        #     私有方法：从书库加载所有书的信息
        #     """
        #     books_path = self.data_config['item_path']
        #     books = []
        #     with open(books_path, 'r', encoding='utf-8') as file:
        #         reader = csv.DictReader(file)  # 使用 DictReader 来读取 CSV 文件
        #         for row in reader:
        #             record_id = row['RECORD_ID']  # 使用列名来获取 RECORD_ID
        #             tags = row['tag']  # 使用列名来获取 tag
        #             books.append({
        #                 'record_id': int(record_id),
        #                 'tags': tags.split('|')  # 将标签字符串转换为列表
        #             })
        #     return books

        # def _filter_books_by_labels(self, books, labels):
        #     """
        #     私有方法：根据标签筛选书籍
        #     """
        #     filtered_books = []
        #     for book in books:
        #         book_tags = set(book['tags'])  # 将标签转换为集合以方便检查
        #         if book_tags.intersection(labels):  # 如果书的标签与用户标签有交集
        #             filtered_books.append(book['record_id'])
        #     return filtered_books
    #endregion
    def _add_random_books(self, record_ids, labels, k=10):
        """
        私有方法：从书库中随机选择 k 本符合条件的书并添加到 record_ids 中
        """
        tag2book_path = self.data_config['tag2recordids_path']
        chunksize = 1000  
        iterator = pd.read_csv(tag2book_path, usecols=['tag', 'RECORD_ID'], chunksize=chunksize)
        tag_df_filtered = pd.DataFrame()
        for chunk in iterator:
            filtered_chunk = chunk[chunk['tag'].isin(labels)]
            tag_df_filtered = pd.concat([tag_df_filtered, filtered_chunk])
        tag_df_filtered['RECORD_ID'] = tag_df_filtered['RECORD_ID'].apply(lambda x: list(map(int, x.split('|'))) if x else [])
        
        available_record_ids = set()
        for row in tag_df_filtered['RECORD_ID']:
            available_record_ids.update(row)
        available_record_ids -= set(record_ids)
        available_record_ids = list(available_record_ids)
        selected_record_ids = random.sample(available_record_ids, 10)


        # available_books = set(self._filter_books_by_labels(books, labels)) - set(record_ids)
        # if len(available_books) < k:
        #     raise ValueError("Not enough unique books available that match the labels.")
        # random_books = random.sample(list(available_books), k)
        # record_ids.extend(random_books)


        return record_ids+list(selected_record_ids)
    
    def generate_topic_name(self,record_ids):
        catagory_path = data_config['catagory_path']
        df = pd.read_csv(catagory_path)
        book_df = df[df['RECORD_ID'].isin(record_ids)]
        # book_infos = book_df.to_dict(orient='book_info')
        book_infos = list(book_df['book_info'])
        labels = set()
        for tag_str in book_df['tag']:  # 遍历标签字符串
            labels.update(tag_str.split('|'))  # 分割标签字符串并更新到集合中
 
        # prompt = f"根据以下信息，请生成一个吸引人且具有创意的书单名与书单简介，长度限制在10个词以内。\
        #             - 书单包含以下书籍：{book_infos}\
        #             - 主要分类标签：{labels}\
        #             请确保书单名反映书单的特点和主题。\
        #             "
        prompt = f"根据以下信息，请生成一个吸引人且具有创意的10个字以内的'书单名'与50个字以内的'书单简介'。\
                    - 书单包含以下书籍：{book_infos}\
                    - 主要分类标签：{labels}\
                    请确保书单名反映书单的特点和主题。\
                    "
        import warnings
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            result = get_gpt_ans(prompt)
        # print(book_infos)
        # print(result)
        # TODO： 这里由于使用了大模型，结果不健壮，后续需要考虑这部分的改进
        topic_name = "未知书单名"
        topic_desc = "未知书单简介"
        lines = result.split('\n')
        for line in lines:
            if '书单名' in line:
                topic_name = line.split('书单名：')[-1].strip()
            elif '简介' in line:
                topic_desc = line.split('简介：')[-1].strip()
        
        # print('topic_name,topic_desc',topic_name,topic_desc)
        return topic_name,topic_desc

    def generate_book_list(self, user_id, group_code, labels):
        """
        生成书单推荐
        :param user_id: 用户 ID
        :param group_code: 机构代码
        :param labels: 用户感兴趣的标签列表
        :return: 推荐结果字典
        """
        user_embedding = self._get_user_embedding(user_id)
        record_ids = self._search_items(user_embedding,k=10)
        record_ids = self._add_random_books(record_ids, labels,k=10)
        # 确保返回20本图书，如果不足20本则返回所有结果.

        record_ids = record_ids[:20]
        topic_name,topic_desc = self.generate_topic_name(record_ids)
        return {
            'recordIds': record_ids,
            'topicName': topic_name,
            'topicDesc': topic_desc
        }

# 使用示例
file_path = './configs/config.json'  
with open(file_path, 'r') as file:  
    data_config = json.load(file)

recommendation_system = BookRecommendationSystem(data_config)
recommendation = recommendation_system.generate_book_list(user_id=6781, group_code='200572.0', labels=['经济', '历史'])
print(recommendation)
#endregion


# ids = [617760, 1367252, 1637185, 1095713, 653923, 1046684, 661154, 1141304, 664311, 676290, 340512, 189575, 2212912, 394650, 465709, 76623, 137707, 355590, 360242, 471467]
# recommendation_system = BookRecommendationSystem(data_config)
# recommendation_system.generate_topic_name(ids)