import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from transformers import AutoTokenizer, AutoModel
import torch
import faiss  # 高效相似度搜索库


# 1. 用户行为数据处理
class UserBehaviorManager:
    def __init__(self):
        self.user_interactions = pd.DataFrame(columns=['user_id', 'item_id', 'timestamp', 'action'])

    def add_interaction(self, user_id, item_id, action='click'):
        """记录用户交互行为"""
        timestamp = pd.Timestamp.now()
        new_interaction = pd.DataFrame({
            'user_id': [user_id],
            'item_id': [item_id],
            'timestamp': [timestamp],
            'action': [action]
        })
        self.user_interactions = pd.concat([self.user_interactions, new_interaction])

    def get_user_history(self, user_id):
        """获取用户历史行为"""
        return self.user_interactions[self.user_interactions['user_id'] == user_id]

    def build_user_item_matrix(self):
        """构建用户-物品交互矩阵"""
        # 简化示例：使用二元交互（点击/未点击）
        matrix = self.user_interactions.pivot_table(
            index='user_id', columns='item_id', values='action',
            aggfunc=lambda x: 1 if len(x) > 0 else 0
        ).fillna(0)
        return matrix


# 2. 内容特征提取
class ContentAnalyzer:
    def __init__(self):
        # 文本特征提取器
        self.text_vectorizer = TfidfVectorizer(max_features=1000)

        # 加载预训练语言模型（用于更高级的语义理解）
        # 加载本地的 BERT 分词器和模型
        BERT_PATH = r'C:\Users\pxfpc_014\.cache\huggingface\hub\models--bert-base-uncased\main'
        self.tokenizer = AutoTokenizer.from_pretrained(BERT_PATH)
        self.text_model = AutoModel.from_pretrained(BERT_PATH)

        # 物品特征库
        self.item_features = {}

    def extract_text_features(self, text):
        """提取文本特征（使用TF-IDF）"""
        if isinstance(text, list):
            return self.text_vectorizer.fit_transform(text)
        else:
            return self.text_vectorizer.transform([text])

    def extract_bert_features(self, text):
        """使用BERT提取文本语义特征"""
        inputs = self.tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=128)
        with torch.no_grad():
            outputs = self.text_model(**inputs)
        # 使用[CLS]标记作为文本表示
        return outputs.last_hidden_state[:, 0, :].numpy()

    def analyze_item(self, item_id, item_content):
        """分析物品内容并提取特征"""
        # 简化示例：仅使用文本特征
        text_features = self.extract_bert_features(item_content)
        self.item_features[item_id] = text_features.flatten()
        return text_features

    def build_faiss_index(self):
        """构建FAISS索引用于快速相似度搜索"""
        if not self.item_features:
            return None

        # 转换特征为numpy数组
        item_ids = list(self.item_features.keys())
        features = np.array([self.item_features[item_id] for item_id in item_ids])

        # 构建索引
        dim = features.shape[1]
        index = faiss.IndexFlatL2(dim)
        index.add(features)

        return index, item_ids


# 3. 推荐引擎
class RecommendationEngine:
    def __init__(self):
        self.user_behavior = UserBehaviorManager()
        self.content_analyzer = ContentAnalyzer()

    def add_item(self, item_id, item_content):
        """添加物品并分析内容"""
        self.content_analyzer.analyze_item(item_id, item_content)

    def record_user_action(self, user_id, item_id, action='click'):
        """记录用户行为"""
        self.user_behavior.add_interaction(user_id, item_id, action)

    def recommend_by_content(self, user_id, top_k=5):
        """基于内容的推荐"""
        user_history = self.user_behavior.get_user_history(user_id)
        if user_history.empty:
            return []  # 用户无历史行为，返回冷启动推荐

        # 获取用户最近交互的物品
        recent_item = user_history.sort_values('timestamp', ascending=False).iloc[0]['item_id']

        # 获取物品特征
        item_features = np.array(list(self.content_analyzer.item_features.values()))
        item_ids = list(self.content_analyzer.item_features.keys())

        # 计算相似度
        target_feature = self.content_analyzer.item_features[recent_item].reshape(1, -1)
        similarities = cosine_similarity(target_feature, item_features)[0]

        # 获取最相似的物品
        similar_indices = similarities.argsort()[::-1][:top_k + 1]  # +1 排除自身

        # 过滤已交互过的物品
        recommended_items = []
        for idx in similar_indices:
            if item_ids[idx] != recent_item and item_ids[idx] not in user_history['item_id'].values:
                recommended_items.append(item_ids[idx])
            if len(recommended_items) >= top_k:
                break

        return recommended_items

    def recommend_by_collaborative_filtering(self, user_id, top_k=5):
        """基于协同过滤的推荐"""
        user_item_matrix = self.user_behavior.build_user_item_matrix()

        if user_id not in user_item_matrix.index:
            return []  # 用户不在矩阵中，返回冷启动推荐

        # 计算用户相似度
        user_similarity = cosine_similarity(user_item_matrix)
        user_idx = user_item_matrix.index.get_loc(user_id)

        # 找到最相似的用户
        similar_users = user_similarity[user_idx].argsort()[::-1][1:11]  # 前10个相似用户

        # 推荐相似用户喜欢的物品
        recommended_items = []
        user_items = set(user_item_matrix.columns[user_item_matrix.iloc[user_idx] > 0])

        for similar_user_idx in similar_users:
            similar_user_items = set(user_item_matrix.columns[user_item_matrix.iloc[similar_user_idx] > 0])
            new_items = similar_user_items - user_items

            for item in new_items:
                if item not in recommended_items:
                    recommended_items.append(item)
                if len(recommended_items) >= top_k:
                    return recommended_items

        return recommended_items

    def hybrid_recommendation(self, user_id, top_k=5, content_weight=0.5):
        """混合推荐：结合内容和协同过滤"""
        content_recs = self.recommend_by_content(user_id, top_k)
        cf_recs = self.recommend_by_collaborative_filtering(user_id, top_k)

        # 给推荐结果打分
        content_scores = {item: (top_k - i) * content_weight for i, item in enumerate(content_recs)}
        cf_scores = {item: (top_k - i) * (1 - content_weight) for i, item in enumerate(cf_recs)}

        # 合并分数
        combined_scores = {}
        for item in set(content_recs + cf_recs):
            combined_scores[item] = content_scores.get(item, 0) + cf_scores.get(item, 0)

        # 按分数排序
        sorted_items = sorted(combined_scores.items(), key=lambda x: x[1], reverse=True)
        return [item for item, score in sorted_items[:top_k]]


# 4. 冷启动策略
def cold_start_recommendations(item_catalog, user_preferences=None):
    """处理新用户/新物品的冷启动问题"""
    # 简化示例：返回热门或分类推荐
    if user_preferences:
        # 根据用户注册时提供的偏好推荐
        return [item for item in item_catalog if item['category'] in user_preferences]
    else:
        # 返回热门推荐
        return sorted(item_catalog, key=lambda x: x['popularity'], reverse=True)[:5]


# 示例使用
if __name__ == "__main__":
    # 初始化推荐引擎
    engine = RecommendationEngine()

    # 添加物品
    items = {
        "item1": "这是一篇关于人工智能在医疗领域应用的文章",
        "item2": "最新的深度学习模型进展与趋势分析",
        "item3": "如何构建高效的推荐系统架构",
        "item4": "计算机视觉技术在自动驾驶中的应用",
        "item5": "自然语言处理的最新研究成果",
        "item6": "机器学习模型的部署与优化实践"
    }

    for item_id, content in items.items():
        engine.add_item(item_id, content)

    # 模拟用户行为
    engine.record_user_action("user1", "item1")
    engine.record_user_action("user1", "item2")
    engine.record_user_action("user2", "item3")
    engine.record_user_action("user2", "item6")

    # 为用户1生成推荐
    recommendations = engine.hybrid_recommendation("user1")
    print(f"为用户1推荐的内容: {recommendations}")