import os
import csv
import logging
import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
import traceback

# 全局数据文件路径
DATA_FILE = "user_article_behavior.csv"

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.StreamHandler(),
        logging.FileHandler('recommendation.log')
    ]
)

# 定义医疗文章分类
MEDICAL_CATEGORIES = {
    '内科': ['内科', '呼吸', '消化', '心血管', '内分泌'],
    '外科': ['外科', '骨科', '神经外科'],
    '妇产科': ['女性', '妇科', '产科', '月经', '孕期'],
    '儿科': ['儿科', '小儿', '婴幼儿'],
    '老年医学': ['老年', '老人'],
    '慢性病': ['慢性病', '糖尿病', '高血压', '高血脂'],
    '呼吸系统': ['呼吸', '肺', '哮喘', 'COPD'],
    '消化系统': ['消化', '胃肠', '肝', '胆', '胰腺'],
    '营养学': ['营养', '饮食', '膳食'],
    '神经系统': ['神经', '脑', '认知', '记忆']
}

def collect_data(user_id: int, article_id: int, favorite_count: int) -> str:
    """
    工具1：数据收集，写入CSV文件
    收集用户ID、文章ID、收藏/浏览次数
    """
    try:
        # 初始化CSV文件（如果不存在）
        if not os.path.exists(DATA_FILE):
            with open(DATA_FILE, 'w', newline='', encoding='utf-8') as file:
                writer = csv.writer(file)
                writer.writerow(['user_id', 'article_id', 'favorite_count'])
        
        # 写入数据
        with open(DATA_FILE, 'a', newline='', encoding='utf-8') as file:
            writer = csv.writer(file)
            writer.writerow([user_id, article_id, favorite_count])

        logging.info(f"数据收集成功: 用户{user_id}, 医疗文章{article_id}, 收藏/浏览{favorite_count}次")

        return f"成功收集数据：用户{user_id}对医疗文章{article_id}的行为，次数{favorite_count}"

    except Exception as e:
        error_msg = f"数据收集失败: {str(e)}"
        logging.error(error_msg)
        return error_msg

def get_article_category(article_title, article_content=None, article_tags=None):
    """
    根据文章标题、内容和标签确定文章所属医疗类别
    """
    if not article_title and not article_content and not article_tags:
        return "未分类"
    
    # 将文本转为字符串并合并为一个文本进行分析
    text = ""
    if article_title:
        text += str(article_title) + " "
    if article_content:
        text += str(article_content) + " "
    if article_tags:
        if isinstance(article_tags, list):
            text += " ".join([str(tag) for tag in article_tags])
        else:
            text += str(article_tags)
    
    # 对每个类别计算匹配关键词数量
    category_scores = {}
    for category, keywords in MEDICAL_CATEGORIES.items():
        score = 0
        for keyword in keywords:
            if keyword in text:
                score += 1
        if score > 0:
            category_scores[category] = score
    
    # 返回得分最高的类别，如果没有匹配则返回"未分类"
    if category_scores:
        return max(category_scores.items(), key=lambda x: x[1])[0]
    return "未分类"

def analyze_similarity() -> str:
    """
    工具2：数据分析，计算用户相似度和文章相似度
    """
    try:
        # 检查数据文件是否存在
        if not os.path.exists(DATA_FILE):
            return "数据文件不存在，请先收集数据"

        # 读取数据
        data = pd.read_csv(DATA_FILE)

        if len(data) == 0:
            return "数据文件为空，请先收集数据"

        print(f"读取到 {len(data)} 条数据")

        # 检查必要的列
        required_columns = ['user_id', 'article_id', 'favorite_count']
        missing_columns = [col for col in required_columns if col not in data.columns]
        if missing_columns:
            return f"数据文件缺少必要的列: {missing_columns}"

        # 数据预处理
        data = data.dropna(subset=required_columns)
        data['user_id'] = pd.to_numeric(data['user_id'], errors='coerce')
        data['article_id'] = pd.to_numeric(data['article_id'], errors='coerce')
        data['favorite_count'] = pd.to_numeric(data['favorite_count'], errors='coerce')
        data = data.dropna()

        if len(data) == 0:
            return "处理后数据为空，请检查数据格式"

        print(f"有效数据：{len(data)}行")

        # 1. 计算用户相似度
        print("开始计算用户相似度...")
        user_article_matrix = data.pivot_table(
            index='user_id',
            columns='article_id',
            values='favorite_count',
            fill_value=0,
            aggfunc='sum'
        )

        user_similarity_matrix = cosine_similarity(user_article_matrix)
        print(f"用户相似度矩阵计算完成: {user_similarity_matrix.shape}")

        # 2. 计算文章相似度
        print("开始计算文章相似度...")
        article_user_matrix = data.pivot_table(
            index='article_id',
            columns='user_id',
            values='favorite_count',
            fill_value=0,
            aggfunc='sum'
        )

        # 基于用户行为的文章相似度
        behavior_similarity = cosine_similarity(article_user_matrix)
        print("基于用户行为的相似度计算完成")
        
        # 假设我们有医疗文章的元数据
        # 这里为了示例，我们从另一个来源获取，实际应用中可能需要从数据库获取
        try:
            from background.models import MedicalArticle
            articles = {article.id: {
                'title': article.title,
                'content': article.content,
                'tags': article.tags
            } for article in MedicalArticle.objects.all()}
        except:
            # 如果无法从数据库获取，创建一个空字典
            articles = {}
            print("无法从数据库获取文章信息，将使用基于用户行为的推荐")

        # 为文章分配医疗类别
        article_categories = {}
        for article_id in article_user_matrix.index:
            article_id = int(article_id)
            if article_id in articles:
                title = articles[article_id].get('title', '')
                content = articles[article_id].get('content', '')
                tags = articles[article_id].get('tags', '')
                category = get_article_category(title, content, tags)
            else:
                category = "未分类"  # 默认分类
            article_categories[article_id] = category

        print(f"医疗文章分类完成，共有 {len(article_categories)} 篇文章")

        # 基于医疗类别的相似度矩阵
        category_matrix = []
        for article1 in article_user_matrix.index:
            row = []
            article1_id = int(article1)
            cat1 = article_categories.get(article1_id, "未分类")
            
            for article2 in article_user_matrix.index:
                article2_id = int(article2)
                cat2 = article_categories.get(article2_id, "未分类")

                if cat1 == cat2 and cat1 != "未分类":
                    row.append(1.0)  # 同类别文章
                elif cat1 != "未分类" and cat2 != "未分类":
                    row.append(0.5)  # 不同类别但都有分类
                else:
                    row.append(0.3)  # 至少一篇未分类
            category_matrix.append(row)

        category_similarity = np.array(category_matrix)
        print("基于医疗类别的相似度计算完成")

        # 综合相似度：行为相似度70% + 类别相似度30%
        article_similarity_matrix = behavior_similarity * 0.7 + category_similarity * 0.3
        print("综合文章相似度矩阵计算完成")

        # 保存相似度矩阵（用于推荐）
        np.save('user_similarity.npy', user_similarity_matrix)
        np.save('article_similarity.npy', article_similarity_matrix)
        np.save('user_indices.npy', user_article_matrix.index.values)
        np.save('article_indices.npy', article_user_matrix.index.values)
        print("相似度矩阵保存成功")

        # 统计每个医疗类别的文章数量
        category_counts = {}
        for article_id, category in article_categories.items():
            category_counts[category] = category_counts.get(category, 0) + 1

        # 统计信息
        stats = {
            "数据总量": len(data),
            "用户数量": data['user_id'].nunique(),
            "文章数量": data['article_id'].nunique(),
            "医疗类别分布": category_counts,
            "浏览次数统计": {
                "平均浏览次数": round(data['favorite_count'].mean(), 2),
                "最少浏览次数": int(data['favorite_count'].min()),
                "最多浏览次数": int(data['favorite_count'].max()),
                "浏览次数标准差": round(data['favorite_count'].std(), 2)
            },
            "相似度统计": {
                "用户平均相似度": round(np.mean(user_similarity_matrix), 3),
                "文章平均相似度": round(np.mean(article_similarity_matrix), 3),
                "用户相似度矩阵维度": f"{user_similarity_matrix.shape[0]}x{user_similarity_matrix.shape[1]}",
                "文章相似度矩阵维度": f"{article_similarity_matrix.shape[0]}x{article_similarity_matrix.shape[1]}"
            }
        }

        print("相似度分析完成")
        return f"相似度分析完成：\n{str(stats)}"
    except Exception as e:
        error_msg = f"相似度分析失败: {str(e)}"
        print(error_msg)
        traceback.print_exc()
        return error_msg

def recommend_articles(user_id: int, top_k: int = 5) -> str:
    """
    工具3：为指定用户推荐医疗文章
    """
    try:
        # 检查必要文件
        required_files = ['user_similarity.npy', 'article_similarity.npy', 'user_indices.npy', 'article_indices.npy']
        missing_files = [f for f in required_files if not os.path.exists(f)]

        if missing_files:
            error_msg = f"❌ 缺少相似度矩阵文件：{missing_files}，请先进行数据分析"
            print(error_msg)
            return error_msg

        # 检查数据文件
        if not os.path.exists(DATA_FILE):
            error_msg = f"❌ 数据文件 {DATA_FILE} 不存在"
            print(error_msg)
            return error_msg

        # 加载数据和相似度矩阵
        try:
            data = pd.read_csv(DATA_FILE)
            user_similarity_matrix = np.load('user_similarity.npy')
            article_similarity_matrix = np.load('article_similarity.npy')
            user_indices = np.load('user_indices.npy')
            article_indices = np.load('article_indices.npy')

        except Exception as e:
            error_msg = f"❌ 加载文件失败: {str(e)}"
            print(error_msg)
            return error_msg

        # 数据预处理
        data = data.dropna(subset=['user_id', 'article_id', 'favorite_count'])
        data['user_id'] = pd.to_numeric(data['user_id'], errors='coerce')
        data['article_id'] = pd.to_numeric(data['article_id'], errors='coerce')
        data = data.dropna()

        # 验证用户存在
        user_id = int(user_id)
        if user_id not in user_indices:
            available_users = list(user_indices)
            error_msg = f"❌ 用户{user_id}不存在于数据中。可用用户ID：{available_users}"
            print(error_msg)
            return error_msg

        # 获取用户索引
        user_idx = np.where(user_indices == user_id)[0][0]

        # 获取用户已浏览的文章
        user_articles = set(data[data['user_id'] == user_id]['article_id'].values)

        # 获取所有文章
        all_articles = set(data['article_id'].unique())

        # 尝试加载文章元数据
        try:
            from background.models import MedicalArticle
            articles = {article.id: {
                'title': article.title,
                'content': article.content,
                'tags': article.tags,
                'category': get_article_category(article.title, article.content, article.tags)
            } for article in MedicalArticle.objects.all()}
        except:
            articles = {}
            print("无法从数据库获取文章信息，将使用基于用户行为的推荐")

        # 1. 基于用户协同过滤推荐
        user_similarities = user_similarity_matrix[user_idx]

        # 降低相似度阈值，获取更多相似用户
        similarity_threshold = 0.1  # 降低阈值
        similar_users_mask = user_similarities > similarity_threshold
        similar_users_idx = np.where(similar_users_mask)[0]

        # 排除自己
        similar_users_idx = similar_users_idx[similar_users_idx != user_idx]

        collaborative_recommendations = set()
        if len(similar_users_idx) > 0:
            # 按相似度排序，取前10个
            sorted_indices = similar_users_idx[np.argsort(user_similarities[similar_users_idx])[::-1][:10]]

            for similar_user_idx in sorted_indices:
                similar_user_id = user_indices[similar_user_idx]
                similarity_score = user_similarities[similar_user_idx]

                # 获取相似用户浏览的文章
                similar_user_articles = data[data['user_id'] == similar_user_id]['article_id'].values
                collaborative_recommendations.update(similar_user_articles)

        # 2. 基于内容过滤推荐
        content_recommendations = set()

        if len(user_articles) > 0:
            for article_id in user_articles:
                if article_id in article_indices:
                    article_idx = np.where(article_indices == article_id)[0][0]
                    article_similarities = article_similarity_matrix[article_idx]

                    # 降低相似度阈值
                    similarity_threshold = 0.1
                    similar_articles_mask = article_similarities > similarity_threshold
                    similar_articles_idx = np.where(similar_articles_mask)[0]

                    # 排除自己
                    similar_articles_idx = similar_articles_idx[similar_articles_idx != article_idx]

                    # 按相似度排序，取前5个
                    if len(similar_articles_idx) > 0:
                        sorted_article_indices = similar_articles_idx[
                            np.argsort(article_similarities[similar_articles_idx])[::-1][:5]]

                        for similar_article_idx in sorted_article_indices:
                            similar_article_id = article_indices[similar_article_idx]
                            similarity_score = article_similarities[similar_article_idx]
                            content_recommendations.add(similar_article_id)

        # 3. 同类别医疗文章推荐
        category_recommendations = set()
        
        # 如果用户有浏览记录，找出用户浏览最多的医疗类别
        if len(user_articles) > 0 and articles:
            user_category_counts = {}
            for article_id in user_articles:
                if int(article_id) in articles:
                    category = articles[int(article_id)].get('category', '未分类')
                    user_category_counts[category] = user_category_counts.get(category, 0) + 1
            
            # 找出用户浏览最多的类别
            if user_category_counts:
                preferred_categories = sorted(user_category_counts.items(), key=lambda x: x[1], reverse=True)
                preferred_categories = [cat for cat, count in preferred_categories if cat != '未分类'][:2]
                
                # 推荐同类别的其他文章
                for article_id in all_articles:
                    if int(article_id) in articles:
                        article_category = articles[int(article_id)].get('category', '未分类')
                        if article_category in preferred_categories:
                            category_recommendations.add(article_id)

        # 4. 医疗重要性文章推荐
        # 假设我们有一些评估文章重要性的指标，如权威来源、专家撰写等
        # 这里简单地使用浏览量和收藏次数作为重要性的代理指标
        article_importance = data.groupby('article_id')['favorite_count'].sum().sort_values(ascending=False)
        important_articles = set(article_importance.head(20).index)  # 取前20篇重要文章

        # 5. 合并所有推荐结果
        all_recommendations = collaborative_recommendations | content_recommendations | category_recommendations | important_articles

        # 排除用户已浏览的文章
        final_recommendations = all_recommendations - user_articles

        if not final_recommendations:
            # 如果还是没有推荐，就推荐一些随机的重要文章
            all_other_articles = all_articles - user_articles

            if len(all_other_articles) == 0:
                return f"🎉 恭喜！用户{user_id}已经浏览了系统中的所有医疗文章！"

            # 随机选择一些未浏览的文章
            import random
            backup_recommendations = list(all_other_articles)
            random.shuffle(backup_recommendations)
            final_recommendations = set(backup_recommendations[:min(top_k, len(backup_recommendations))])

        # 6. 计算推荐评分并排序
        recommendation_scores = {}
        for article_id in final_recommendations:
            score = 0

            # 协同过滤分数
            if article_id in collaborative_recommendations:
                score += 0.6

            # 内容过滤分数
            if article_id in content_recommendations:
                score += 0.4
                
            # 类别匹配分数
            if article_id in category_recommendations:
                score += 0.5

            # 文章重要性分数
            if article_id in important_articles:
                score += 0.3

            # 浏览次数分数
            article_popularity_score = len(data[data['article_id'] == article_id]) * 0.1
            score += article_popularity_score

            # 平均浏览次数分数
            avg_favorite = data[data['article_id'] == article_id]['favorite_count'].mean()
            avg_score = avg_favorite * 0.1
            score += avg_score

            recommendation_scores[article_id] = score

        # 按评分排序获取Top-K推荐
        top_recommendations = sorted(recommendation_scores.items(), key=lambda x: x[1], reverse=True)[:top_k]

        # 生成推荐结果详情
        recommended_articles = []
        for i, (article_id, score) in enumerate(top_recommendations, 1):
            article_data = data[data['article_id'] == article_id]
            total_favorites = article_data['favorite_count'].sum()
            unique_users = article_data['user_id'].nunique()

            # 获取文章标题和类别信息（如果有）
            title = "未知标题"
            category = "未分类"
            if int(article_id) in articles:
                title = articles[int(article_id)].get('title', "未知标题")
                category = articles[int(article_id)].get('category', "未分类")

            reason = []
            if article_id in collaborative_recommendations:
                reason.append("用户相似推荐")
            if article_id in content_recommendations:
                reason.append("内容相似")
            if article_id in category_recommendations:
                reason.append("医疗类别匹配")
            if article_id in important_articles:
                reason.append("重要医疗知识")
            if not reason:
                reason.append("系统推荐")

            recommended_articles.append({
                "排名": i,
                "文章ID": int(article_id),
                "标题": title,
                "医疗类别": category,
                "推荐评分": round(score, 3),
                "总浏览次数": int(total_favorites),
                "浏览用户数": int(unique_users),
                "推荐理由": "+".join(reason)
            })

        # 返回推荐结果
        result = f"为用户{user_id}推荐的前{len(recommended_articles)}篇医疗文章：\n"
        for article in recommended_articles:
            result += f"排名{article['排名']}: 文章ID {article['文章ID']} - 《{article['标题']}》 - 类别: {article['医疗类别']} - 理由: {article['推荐理由']}\n"

        return result

    except Exception as e:
        error_msg = f"❌ 医疗文章推荐生成失败: {str(e)}"
        print(error_msg)
        traceback.print_exc()
        return error_msg 