# ContentKNNAlgorithm.py
from functools import lru_cache

from surprise import AlgoBase
from surprise import PredictionImpossible
from MovieLens import MovieLens
import math
import numpy as np
import heapq
import os
import joblib
from pathlib import Path

from BookCrossingLoader import BookCrossingLoader

# 新增文本处理工具类
from sklearn.feature_extraction.text import TfidfVectorizer


class TextProcessor:
    def __init__(self):
        self.vectorizer = TfidfVectorizer(stop_words='english', max_features=1000)

    def fit_transform(self, texts: list) -> np.ndarray:
        return self.vectorizer.fit_transform(texts).toarray()

    def transform(self, texts: list) -> np.ndarray:
        return self.vectorizer.transform(texts).toarray()


class ContentKNNAlgorithm(AlgoBase):
    def __init__(self, k: int = 40, sim_options: dict = {}, cache_path: str = 'similarity_cache.joblib'):
        """
        初始化 ContentKNNAlgorithm 类。

        参数:
            k (int): 最近邻的数量，默认为 40。
            sim_options (dict): 相似度计算选项，默认为空字典。
            cache_path (str): 缓存文件路径，默认为 'similarity_cache.joblib'。
        """
        AlgoBase.__init__(self)
        self.k = k
        # self.cache_path = Path(cache_path)
        self.cache_path = cache_path
        # 加载缓存（如果存在）
        self.similarity_cache = self.load_cache() if os.path.exists(self.cache_path) else {}
        print(len(self.similarity_cache))

    def save_cache(self) -> None:
        """
        将相似度缓存保存到本地文件。
        """
        try:
            joblib.dump(self.similarity_cache, self.cache_path)
            print(f"Similarity cache saved to {self.cache_path}")
        except Exception as e:
            print(f"Failed to save cache: {e}")

    def load_cache(self) -> dict:
        """
        从本地文件加载相似度缓存。

        返回:
            dict: 缓存的相似度字典。
        """
        try:
            cache = joblib.load(self.cache_path)
            print(f"Similarity cache loaded from {self.cache_path}")
            return cache
        except Exception as e:
            print(f"Failed to load cache: {e}")
            return {}

    # @lru_cache(maxsize=None)
    def compute_similarity(self, thisBookID: int, otherBookID: int, genres: dict, years: dict, summers: dict) -> float:
        """
        综合计算两本书的相似度，并缓存结果。
        """
        key = (thisBookID, otherBookID)
        # print(key in self.similarity_cache)
        if key in self.similarity_cache:
            return self.similarity_cache[key]

        # 计算相似度
        genreSimilarity = self.computeGenreSimilarity(thisBookID, otherBookID, genres)
        yearSimilarity = self.computeYearSimilarity(thisBookID, otherBookID, years)
        summerySimilarity = self.computeSummerySimilarity(thisBookID, otherBookID, summers, self.text_processor)
        similarity = genreSimilarity * yearSimilarity * summerySimilarity

        # 缓存结果
        self.similarity_cache[key] = similarity
        return similarity

    def fit(self, trainset) -> 'ContentKNNAlgorithm':
        """
        训练模型并计算基于内容的相似度矩阵。
        
        参数:
            trainset: 训练数据集。
        
        返回:
            self: 当前实例。
        """
        AlgoBase.fit(self, trainset)

        # 加载图书的类型向量
        # 计算图书相似度需要: title[x], year, category, author[x], publisher[x], summery
        ml = BookCrossingLoader('../ml-latest-small/book_crossing.csv')
        genres = ml.getCategory()
        years = ml.getYears()
        summers = ml.getSummery()

        print("Computing content-based similarity matrix...")

        # 初始化文本处理器
        self.text_processor = TextProcessor()
        all_summaries = [s for s in summers.values()]
        self.text_processor.fit_transform(all_summaries)  # 训练TF-IDF模型

        # 初始化相似度矩阵
        # 二维 numpy 数组 (n_items x n_items)，存储物品间的综合相似度分数
        self.similarities = np.zeros((self.trainset.n_items, self.trainset.n_items))

        # thisRating 是物品在 trainset 中的内部索引 (0~n_items-1)
        # 外层循环遍历所有物品
        for thisRating in range(self.trainset.n_items):
            if thisRating % 100 == 0:
                print(thisRating, " of ", self.trainset.n_items)
            # 除了当前物品, 内层循环遍历所有其他物品, 并计算相似度
            for otherRating in range(thisRating + 1, self.trainset.n_items):
                # 转换内部ID为原始ID
                # 使用 to_raw_iid() 方法将标准化索引转换为原始书籍ID
                thisBookID = int(self.trainset.to_raw_iid(thisRating))
                otherBookID = int(self.trainset.to_raw_iid(otherRating))

                # 使用缓存计算相似度
                similarity = self.compute_similarity(thisBookID, otherBookID, genres, years, summers)
                self.similarities[thisRating, otherRating] = similarity
                self.similarities[otherRating, thisRating] = similarity

        # 保存缓存到本地文件
        self.save_cache()
        print("...done.")
        return self

    def computeGenreSimilarity(self, movie1: int, movie2: int, genres: dict) -> float:
        """
        计算两部图书的类型相似度。
        类型一般是离散的位向量([1,0,0,1]), 适合使用余弦相似度
        参数:
            book1 (int): 第一部图书的 ID。
            book2 (int): 第二部图书的 ID。
            genres (dict): 每部图书的类型向量。
        
        返回:
            float: 类型相似度值。
        """
        genres1 = genres[movie1]
        genres2 = genres[movie2]
        sumxx, sumxy, sumyy = 0, 0, 0
        for i in range(len(genres1)):
            x = genres1[i]
            y = genres2[i]
            print(
                f'genres1 {x}',
                f'genres2 {y}',
            )
            # 计算x的模
            sumxx += x * x
            # 计算y的模
            sumyy += y * y
            # 计算xy点积
            sumxy += x * y

        return sumxy / math.sqrt(sumxx * sumyy) if sumxx != 0 and sumyy != 0 else 0.0

    def computeYearSimilarity(self, movie1: int, movie2: int, years: dict) -> float:
        """
        计算两部图书的出版年份相似度。
        
        参数:
            movie1 (int): 第一部图书的 ID。
            movie2 (int): 第二部图书的 ID。
            years (dict): 每部图书的出版年份。
        
        返回:
            float: 年份相似度值。
        """
        diff = abs(years[movie1] - years[movie2])
        sim = math.exp(-diff / 10.0)
        return sim

    def computeSummerySimilarity(self, book1: int, book2: int, summaries: dict, text_processor: TextProcessor) -> float:
        """
        计算文本摘要的余弦相似度

        参数:
            book1 (int): 图书1 ID
            book2 (int): 图书2 ID
            summaries (dict): 格式 {bookID: "摘要文本"}
            text_processor: 预训练的文本处理器

        返回:
            float: 相似度分数 [0,1]
        """
        text1 = summaries.get(book1, "")
        text2 = summaries.get(book2, "")

        # 转换为TF-IDF向量
        vectors = text_processor.transform([text1, text2])

        # 计算余弦相似度
        dot_product = np.dot(vectors[0], vectors[1])
        norm_product = np.linalg.norm(vectors[0]) * np.linalg.norm(vectors[1])
        return dot_product / norm_product if norm_product != 0 else 0.0

    def estimate(self, u: int, i: int) -> float:
        """
        预测评分。
        
        参数:
            u (int): 用户 ID。
            i (int): 项目ID。
        
        返回:
            float: 预测评分。
        
        异常:
            PredictionImpossible: 如果用户或项目未知。
        """
        if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)):
            raise PredictionImpossible('User and/or item is unknown.')

        # 构建当前项目与用户评分项目的相似度分数
        neighbors = []
        for rating in self.trainset.ur[u]:
            genreSimilarity = self.similarities[i, rating[0]]
            neighbors.append((genreSimilarity, rating[1]))

        # 提取最相似的 K 个评分
        k_neighbors = heapq.nlargest(self.k, neighbors, key=lambda t: t[0])

        # 计算 K 个邻居的加权平均评分
        simTotal = weightedSum = 0
        for simScore, rating in k_neighbors:
            if simScore > 0:
                simTotal += simScore
                weightedSum += simScore * rating

        if simTotal == 0:
            raise PredictionImpossible('No neighbors')

        predictedRating = weightedSum / simTotal
        return predictedRating
