package com.adnaan.backend.service.impl;

import com.adnaan.backend.entity.Article;
import com.adnaan.backend.entity.UserCollect;
import com.adnaan.backend.entity.ViewHistory;
import com.adnaan.backend.entity.dto.OptionDto;
import com.adnaan.backend.mapper.ArticleMapper;
import com.adnaan.backend.mapper.UserCollectMapper;
import com.adnaan.backend.mapper.ViewHistoryMapper;
import com.adnaan.backend.service.ArticleRecommendService;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Service;

import java.util.*;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;

@Slf4j
@Service
public class ArticleRecommendServiceImpl implements ArticleRecommendService {
    private static final String USER_INTERESTS_CACHE_KEY = "user:interests:";
    private static final int BATCH_SIZE = 100;
    private static final long CACHE_EXPIRATION = 1;

    @Resource
    private UserCollectMapper userCollectMapper;
    @Resource
    private ViewHistoryMapper viewHistoryMapper;
    @Resource
    private ArticleMapper articleMapper;
    @Resource
    private RedisTemplate<String, Object> redisTemplate;

    @Override
    public List<Article> recommendArticles(Long userId, int limit) {
        // 1. 尝试从缓存获取用户兴趣特征
        Map<Long, Double> articleWeights = getUserInterests(userId);

        // 2. 分批获取文章并计算推荐分数
        List<Article> recommendedArticles = new ArrayList<>();
        int offset = 0;

        while (recommendedArticles.size() < limit) {
            // 分批加载文章
            List<Article> articleBatch = getArticleBatch(offset, BATCH_SIZE);
            if (articleBatch.isEmpty()) {
                break;
            }

            // 计算当前批次的推荐文章
            List<Article> batchRecommendations = articleBatch.stream()
                    .filter(article -> !articleWeights.containsKey(article.getId()))
                    .peek(article -> {
                        double score = calculateSimilarityScore(article, articleWeights, articleBatch);
                        article.setRecommendScore(score);
                    })
                    .sorted(Comparator.comparingDouble(Article::getRecommendScore).reversed())
                    .toList();

            recommendedArticles.addAll(batchRecommendations);
            offset += BATCH_SIZE;
        }

        // 如果推荐列表为空且用户有兴趣特征，考虑时间衰减因子
        if (recommendedArticles.isEmpty() && !articleWeights.isEmpty()) {
            recommendedArticles = getArticlesWithTimeDecay(articleWeights, limit);
        }

        return recommendedArticles.stream()
                .limit(limit)
                .collect(Collectors.toList());
    }


    /**
     * 获取用户兴趣特征
     *
     * @param userId 用户id
     * @return 用户兴趣特征
     */
    private Map<Long, Double> getUserInterests(Long userId) {
        String cacheKey = USER_INTERESTS_CACHE_KEY + userId;

        // 尝试从缓存获取
        @SuppressWarnings("unchecked")
        Map<Long, Double> cachedInterests = (Map<Long, Double>) redisTemplate.opsForValue().get(cacheKey);
        if (cachedInterests != null) {
            return cachedInterests;
        }

        // 构建用户兴趣特征
        Map<Long, Double> articleWeights = new HashMap<>();

        // 获取用户收藏和浏览历史
        List<UserCollect> userCollects = userCollectMapper.selectList(
                new LambdaQueryWrapper<UserCollect>()
                        .eq(UserCollect::getUserId, userId)
        );

        List<ViewHistory> viewHistories = viewHistoryMapper.selectList(
                new LambdaQueryWrapper<ViewHistory>()
                        .eq(ViewHistory::getUserId, userId)
        );

        // 计算权重
        userCollects.forEach(collect ->
                articleWeights.merge(collect.getArticleId(), 2.0, Double::sum)
        );

        viewHistories.forEach(history ->
                articleWeights.merge(history.getArticleId(),
                        (history.getProcess() / 100.0), Double::sum)
        );

        // 缓存用户兴趣特征
        redisTemplate.opsForValue().set(cacheKey, articleWeights, CACHE_EXPIRATION, TimeUnit.HOURS);

        return articleWeights;
    }

    private List<Article> getArticleBatch(int offset, int batchSize) {
        List<Article> articles = articleMapper.selectList(
                new LambdaQueryWrapper<Article>()
                        .last(String.format("LIMIT %d, %d", offset, batchSize))
        );

        // 加载文章标签
        articles.forEach(article -> {
            List<OptionDto> tags = articleMapper.getTags(article.getId());
            article.setTags(tags != null ? tags : new ArrayList<>());
        });

        return articles;
    }

    /**
     * 计算时间衰减后的推荐文章
     *
     * @param articleWeights 用户兴趣特征
     * @param limit          限制数量
     * @return 文章列表
     */
    private List<Article> getArticlesWithTimeDecay(Map<Long, Double> articleWeights, int limit) {
        List<Article> allArticles = getArticleBatch(0, limit * 2);

        return allArticles.stream()
                .peek(article -> {
                    double score = calculateSimilarityScore(article, articleWeights, allArticles);
                    // 应用时间衰减因子
                    long daysSinceCreation = TimeUnit.MILLISECONDS.toDays(
                            System.currentTimeMillis() - article.getCreateTime().getTime());
                    double timeDecay = Math.exp(-0.1 * daysSinceCreation);
                    article.setRecommendScore(score * timeDecay);
                })
                .sorted(Comparator.comparingDouble(Article::getRecommendScore).reversed())
                .limit(limit)
                .collect(Collectors.toList());
    }


    /**
     * 计算相似性分值
     *
     * @param targetArticle 目标文章
     * @param userInterests 用户兴趣特征
     * @param articles      文章列表
     * @return 相似度分值
     */
    private double calculateSimilarityScore(Article targetArticle,
                                            Map<Long, Double> userInterests,
                                            List<Article> articles) {
        double score = 0.0;

        Set<String> targetTags = targetArticle.getTags() == null ?
                Collections.emptySet() :
                targetArticle.getTags().stream()
                        .map(OptionDto::getValue)
                        .collect(Collectors.toSet());

        for (Map.Entry<Long, Double> entry : userInterests.entrySet()) {
            Article userArticle = articles.stream()
                    .filter(a -> a.getId().equals(entry.getKey()))
                    .findFirst()
                    .orElse(null);

            if (userArticle != null) {
                Set<String> userArticleTags = userArticle.getTags() == null ?
                        Collections.emptySet() :
                        userArticle.getTags().stream()
                                .map(OptionDto::getValue)
                                .collect(Collectors.toSet());

                double tagSimilarity = calculateJaccardSimilarity(targetTags, userArticleTags);
                double titleSimilarity = calculateTitleSimilarity(
                        targetArticle.getTitle(),
                        userArticle.getTitle()
                );

                score += (tagSimilarity * 0.7 + titleSimilarity * 0.3) * entry.getValue();
            }
        }

        return score;
    }

    /**
     * 计算两个集合的交集和并集的相似度
     *
     * @param set1 集合1
     * @param set2 集合2
     * @return 相似度
     */
    private double calculateJaccardSimilarity(Set<String> set1, Set<String> set2) {
        if (set1.isEmpty() && set2.isEmpty()) return 0.0;

        Set<String> union = new HashSet<>(set1);
        union.addAll(set2);

        Set<String> intersection = new HashSet<>(set1);
        intersection.retainAll(set2);

        return (double) intersection.size() / union.size();
    }

    /**
     * 计算文章标题的相似度
     *
     * @param title1 文章标题1
     * @param title2 文章标题2
     * @return 相似度
     */
    private double calculateTitleSimilarity(String title1, String title2) {
        if (title1 == null || title2 == null) {
            return 0.0;
        }
        Set<String> chars1 = title1.chars()
                .mapToObj(ch -> String.valueOf((char) ch))
                .collect(Collectors.toSet());
        Set<String> chars2 = title2.chars()
                .mapToObj(ch -> String.valueOf((char) ch))
                .collect(Collectors.toSet());

        return calculateJaccardSimilarity(chars1, chars2);
    }
}