from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np

# 假设我们有以下图书数据
books = [
    {"title": "天龙八部", "category": "武侠", "author": "金庸"},
    {"title": "笑傲江湖", "category": "武侠", "author": "金庸"},
    {"title": "射雕英雄传", "category": "武侠", "author": "金庸"},
    {"title": "红楼梦", "category": "古典", "author": "曹雪芹"},
]

# 将分类和作者合并为一个字符串，以便进行TF-IDF向量化
books_text = [f"{book['category']} {book['author']}" for book in books]

# 初始化TfidfVectorizer并拟合图书数据
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(books_text)

# 假设用户借阅了以下图书
user_history = ["天龙八部", "笑傲江湖"]

# 从数据集中找到用户借阅的图书的索引
user_books_indices = [i for i, book in enumerate(books) if book['title'] in user_history]

# 提取用户借阅图书的特征向量，并计算其平均值作为用户兴趣向量
user_profile = np.asarray(X[user_books_indices].mean(axis=0))  # 注意：这里使用toarray()转换为NumPy数组

# 计算用户兴趣向量与所有图书特征向量的相似度
similarities = cosine_similarity(user_profile.reshape(1, -1), X)  # 确保输入是二维的，并将X转换为NumPy数组

# 设置一个阈值来过滤推荐结果
threshold = 0.30  # 例如，只推荐相似度超过0.5的图书

# 根据相似度进行排序，并获取最相似的图书索引
sorted_indices = similarities[0].argsort()[::-1]
filtered_indices = [i for i in sorted_indices if similarities[0][i] >= threshold and i not in user_books_indices]

# 推荐相似度最高的N本图书（这里N为所有过滤后的索引数量）
recommended_books = [books[i] for i in filtered_indices[:3]]  # 只取前3个作为示例，或者你可以根据filtered_indices的长度来决定

for book in recommended_books:
    print(f"推荐图书: {book['title']}, 分类: {book['category']}, 作者: {book['author']}")
