package com.example.demo.simple.service;

import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.huaban.analysis.jieba.JiebaSegmenter;
import com.huaban.analysis.jieba.SegToken;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.data.redis.connection.RedisConnection;
import org.springframework.data.redis.connection.zset.Tuple;
import org.springframework.data.redis.core.*;
import org.springframework.stereotype.Component;
import java.nio.charset.StandardCharsets;
import java.util.*;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;

@Component
public class HighPerformanceRedisSearchService {
    private static final Logger logger = LoggerFactory.getLogger(HighPerformanceRedisSearchService.class);

    // 常量定义
    public static final String DATA_PREFIX = "data:";
    public static final String INDEX_PREFIX = "idx:";
    public static final String TEMP_SCORE_PREFIX = "temp:search:score:";
    public static final int EXPIRE_TIME = 24 * 3600; // 数据过期时间(秒)
    private static final int SCAN_COUNT = 500; // 每次SCAN扫描数量

    // 评分权重配置
    private static final int EXACT_MATCH_SCORE = 50;
    private static final int FULL_WORD_SCORE = 20;
    private static final int SUB_WORD_SCORE = 5;
    private static final int SINGLE_CHAR_SCORE = 0;
    private static final int MIN_SCORE_THRESHOLD = 5; // 降低阈值，避免误过滤

    // 停用词集合（精简，避免过度过滤）
    private static final Set<String> STOP_WORDS = new HashSet<>(Arrays.asList(
        "的", "了", "是", "在", "和", "呢", "呀", "啊", "哦"
    ));

    private final JiebaSegmenter segmenter = new JiebaSegmenter();
    private final RedisTemplate<String, String> redisTemplate;
    private final ObjectMapper objectMapper;

    // ThreadLocal缓存分词器
    private final ThreadLocal<JiebaSegmenter> segmenterLocal = ThreadLocal.withInitial(JiebaSegmenter::new);

    public HighPerformanceRedisSearchService(RedisTemplate<String, String> redisTemplate, ObjectMapper objectMapper) {
        this.redisTemplate = redisTemplate;
        this.objectMapper = objectMapper;
    }

    /**
     * 保存数据并构建索引（增加调试日志）
     */
    public void save(String business, Long id, Map<String, String> fieldMap) throws JsonProcessingException {
        logger.debug("保存数据 - business: {}, id: {}, 字段: {}", business, id, fieldMap);
        validateParams(business, id, fieldMap);
        saveOriginalData(business, id, fieldMap);
        buildIndex(business, id, fieldMap);
    }

    /**
     * 搜索方法（增加完整调试日志）
     */
    public List<ScoredResult> searchWithScore(String business, String query, int page, int size) {
        long start = System.currentTimeMillis();
        String tempScoreKey = TEMP_SCORE_PREFIX + UUID.randomUUID();
        logger.debug("开始搜索 - business: {}, query: {}, 页码: {}, 每页大小: {}", 
                   business, query, page, size);
        
        try {
            // 1. 分词并获取权重
            Map<String, Integer> tokenWeights = getTokenWeights(query);
            logger.debug("分词结果: {}", tokenWeights.keySet());
            if (tokenWeights.isEmpty()) {
                logger.debug("未获取到有效分词，返回空结果");
                return Collections.emptyList();
            }
            
            // 2. 提取核心词
            String coreToken = extractCoreToken(query);
            logger.debug("提取核心词: {}", coreToken);
            if (coreToken == null || coreToken.isEmpty()) {
                logger.debug("未提取到有效核心词，返回空结果");
                return Collections.emptyList();
            }

            // 3. 批量计算分数
            batchCalculateScores(business, tokenWeights, tempScoreKey);
            
            // 检查临时分数集是否有数据
            Long tempCount = redisTemplate.opsForZSet().zCard(tempScoreKey);
            logger.debug("计算分数后，临时集合数量: {}", tempCount);
            if (tempCount == null || tempCount == 0) {
                return Collections.emptyList();
            }
            
            // 4. 为完全匹配的结果额外加分
            addExactMatchScoreInRedis(business, query, tempScoreKey);
            
            // 5. 核心词过滤
            Set<String> coreIds = getIdsByToken(business, coreToken);
            logger.debug("核心词[{}]匹配到的ID数量: {}", coreToken, coreIds.size());
            if (coreIds.isEmpty()) {
                return Collections.emptyList();
            }
            filterByCoreToken(tempScoreKey, coreIds);
            
            // 过滤后检查
            Long afterFilterCount = redisTemplate.opsForZSet().zCard(tempScoreKey);
            logger.debug("核心词过滤后，临时集合数量: {}", afterFilterCount);
            if (afterFilterCount == null || afterFilterCount == 0) {
                return Collections.emptyList();
            }
            
            // 6. 分页查询结果
            List<ScoredResult> results = getPagedResults(tempScoreKey, page, size);
            logger.debug("搜索完成，返回结果数量: {}", results.size());
            return results;

        } catch (Exception e) {
            logger.error("搜索过程发生异常", e);
            return Collections.emptyList();
        } finally {
            redisTemplate.expire(tempScoreKey, 5, TimeUnit.MINUTES);
            logger.debug("搜索总耗时: {}ms", System.currentTimeMillis() - start);
        }
    }

    /**
     * 获取原始数据（增加调试）
     */
    public Map<String, String> get(String business, Long id) {
        try {
            String dataKey = DATA_PREFIX + business + ":" + id;
            String json = redisTemplate.opsForValue().get(dataKey);
            logger.debug("获取原始数据 - key: {}, 存在: {}", dataKey, json != null);
            return json != null ? objectMapper.readValue(json, Map.class) : Collections.emptyMap();
        } catch (Exception e) {
            logger.error("获取原始数据异常", e);
            return Collections.emptyMap();
        }
    }

    /**
     * 提取核心词（增加容错）
     */
    private String extractCoreToken(String query) {
        try {
            List<SegToken> tokens = segmenterLocal.get().process(query, JiebaSegmenter.SegMode.SEARCH);
            for (int i = tokens.size() - 1; i >= 0; i--) {
                String word = tokens.get(i).word;
                if (word.length() > 1 && !STOP_WORDS.contains(word)) {
                    return word;
                }
            }
        } catch (Exception e) {
            logger.error("提取核心词异常", e);
        }
        // 容错：如果提取失败，直接使用原查询词
        return query;
    }

    /**
     * 根据分词获取所有匹配的ID（增加调试）
     */
    private Set<String> getIdsByToken(String business, String token) {
        Set<String> ids = new HashSet<>();
        String pattern = INDEX_PREFIX + business + ":*:" + token;
        logger.debug("根据分词获取ID - pattern: {}", pattern);
        
        Set<String> keys = scanKeys(pattern);
        logger.debug("匹配到的索引键数量: {}", keys.size());
        
        if (keys.isEmpty()) {
            return ids;
        }
        
        List<Object> results = redisTemplate.executePipelined(new RedisCallback<Object>() {
            @Override
            public Object doInRedis(RedisConnection connection) {
                for (String key : keys) {
                    connection.sMembers(key.getBytes(StandardCharsets.UTF_8));
                }
                return null;
            }
        }, redisTemplate.getStringSerializer());

        for (Object result : results) {
            if (result instanceof Set) {
                @SuppressWarnings("unchecked")
                Set<String> keyIds = (Set<String>) result;
                ids.addAll(keyIds);
                logger.debug("从索引键获取到ID数量: {}", keyIds.size());
            }
        }
        return ids;
    }

    /**
     * 批量计算分数
     */
    private void batchCalculateScores(String business, Map<String, Integer> tokenWeights, String tempScoreKey) {
        List<String> patterns = tokenWeights.keySet().stream()
                .map(token -> INDEX_PREFIX + business + ":*:" + token)
                .collect(Collectors.toList());
        logger.debug("批量计算分数 - 匹配模式数量: {}", patterns.size());

        Set<String> allMatchedKeys = scanMultiPatterns(patterns);
        logger.debug("匹配到的所有索引键数量: {}", allMatchedKeys.size());
        if (allMatchedKeys.isEmpty()) {
            return;
        }

        batchUpdateScores(allMatchedKeys, tokenWeights, tempScoreKey);
    }

    /**
     * 批量更新分数
     */
    private void batchUpdateScores(Set<String> keys, Map<String, Integer> tokenWeights, String tempScoreKey) {
        Map<String, Set<String>> keyToIds = new HashMap<>(keys.size());
        List<Object> results = redisTemplate.executePipelined(new RedisCallback<Object>() {
            @Override
            public Object doInRedis(RedisConnection connection) {
                for (String key : keys) {
                    connection.sMembers(key.getBytes(StandardCharsets.UTF_8));
                }
                return null;
            }
        }, redisTemplate.getStringSerializer());

        Iterator<String> keyIterator = keys.iterator();
        for (Object result : results) {
            if (result instanceof Set && keyIterator.hasNext()) {
                keyToIds.put(keyIterator.next(), (Set<String>) result);
            }
        }

        logger.debug("批量更新分数 - 处理键值对数量: {}", keyToIds.size());
        redisTemplate.executePipelined(new RedisCallback<Object>() {
            @Override
            public Object doInRedis(RedisConnection connection) {
                byte[] tempKeyBytes = tempScoreKey.getBytes(StandardCharsets.UTF_8);
                for (Map.Entry<String, Set<String>> entry : keyToIds.entrySet()) {
                    String token = extractTokenFromKey(entry.getKey());
                    int weight = tokenWeights.getOrDefault(token, 0);
                    if (weight <= 0) continue;

                    for (String id : entry.getValue()) {
                        connection.zIncrBy(
                            tempKeyBytes,
                            weight,
                            id.getBytes(StandardCharsets.UTF_8)
                        );
                    }
                }
                return null;
            }
        });
    }

    /**
     * 多模式扫描
     */
    private Set<String> scanMultiPatterns(List<String> patterns) {
        Set<String> allKeys = new HashSet<>();
        redisTemplate.execute(new RedisCallback<Void>() {
            @Override
            public Void doInRedis(RedisConnection connection) {
                for (String pattern : patterns) {
                    ScanOptions options = ScanOptions.scanOptions()
                            .match(pattern)
                            .count(SCAN_COUNT)
                            .build();
                    Cursor<byte[]> cursor = connection.scan(options);
                    while (cursor.hasNext()) {
                        allKeys.add(new String(cursor.next(), StandardCharsets.UTF_8));
                    }
                }
                return null;
            }
        });
        return allKeys;
    }

    /**
     * 为完全匹配的结果额外加分
     */
    private void addExactMatchScoreInRedis(String business, String query, String tempScoreKey) {
        String pattern = INDEX_PREFIX + business + ":*:" + query;
        Set<String> exactKeys = scanKeys(pattern);
        logger.debug("完全匹配的索引键数量: {}", exactKeys.size());
        if (exactKeys.isEmpty()) return;

        List<Object> results = redisTemplate.executePipelined(new RedisCallback<Object>() {
            @Override
            public Object doInRedis(RedisConnection connection) {
                for (String key : exactKeys) {
                    connection.sMembers(key.getBytes(StandardCharsets.UTF_8));
                }
                return null;
            }
        }, redisTemplate.getStringSerializer());

        Iterator<String> keyIterator = exactKeys.iterator();
        redisTemplate.executePipelined(new RedisCallback<Object>() {
            @Override
            public Object doInRedis(RedisConnection connection) {
                byte[] tempKeyBytes = tempScoreKey.getBytes(StandardCharsets.UTF_8);
                for (Object result : results) {
                    if (result instanceof Set && keyIterator.hasNext()) {
                        keyIterator.next();
                        @SuppressWarnings("unchecked")
                        Set<String> ids = (Set<String>) result;
                        for (String id : ids) {
                            connection.zIncrBy(
                                tempKeyBytes,
                                EXACT_MATCH_SCORE,
                                id.getBytes(StandardCharsets.UTF_8)
                            );
                        }
                    }
                }
                return null;
            }
        });
    }

    /**
     * 核心词过滤
     */
    private void filterByCoreToken(String tempScoreKey, Set<String> coreIds) {
        redisTemplate.execute(new RedisCallback<Void>() {
            @Override
            public Void doInRedis(RedisConnection connection) {
                byte[] tempKeyBytes = tempScoreKey.getBytes(StandardCharsets.UTF_8);
                Cursor<Tuple> cursor = connection.zScan(
                    tempKeyBytes, 
                    ScanOptions.scanOptions().count(SCAN_COUNT).build()
                );
                
                List<byte[]> toRemove = new ArrayList<>(100);
                while (cursor.hasNext()) {
                    byte[] idBytes = cursor.next().getValue();
                    String id = new String(idBytes, StandardCharsets.UTF_8);
                    if (!coreIds.contains(id)) {
                        toRemove.add(idBytes);
                        if (toRemove.size() >= 100) {
                            connection.zRem(tempKeyBytes, toRemove.toArray(new byte[0][]));
                            toRemove.clear();
                        }
                    }
                }
                
                if (!toRemove.isEmpty()) {
                    connection.zRem(tempKeyBytes, toRemove.toArray(new byte[0][]));
                }
                return null;
            }
        });
    }

    /**
     * 安全扫描键
     */
    private Set<String> scanKeys(String pattern) {
        Set<String> keys = new HashSet<>();
        redisTemplate.execute(new RedisCallback<Void>() {
            @Override
            public Void doInRedis(RedisConnection connection) {
                ScanOptions options = ScanOptions.scanOptions()
                        .match(pattern)
                        .count(SCAN_COUNT)
                        .build();
                Cursor<byte[]> cursor = connection.scan(options);
                while (cursor.hasNext()) {
                    keys.add(new String(cursor.next(), StandardCharsets.UTF_8));
                }
                return null;
            }
        });
        return keys;
    }

    /**
     * 获取分页结果（修复页码计算）
     */
    private List<ScoredResult> getPagedResults(String tempScoreKey, int page, int size) {
        // 修复页码从0开始的问题，若前端传1则实际取第0页
        long offset = (long) (page - 1) * size;
        if (offset < 0) offset = 0;
        
        logger.debug("分页查询 - 偏移量: {}, 每页大小: {}", offset, size);
        Set<ZSetOperations.TypedTuple<String>> topResults = redisTemplate.opsForZSet()
                .reverseRangeByScoreWithScores(
                        tempScoreKey,
                        MIN_SCORE_THRESHOLD,
                        Double.POSITIVE_INFINITY,
                        offset,
                        size
                );

        if (topResults == null || topResults.isEmpty()) {
            return Collections.emptyList();
        }

        return topResults.stream()
                .map(tuple -> new ScoredResult(
                        Long.parseLong(tuple.getValue()),
                        tuple.getScore().intValue()
                ))
                .collect(Collectors.toList());
    }

    /**
     * 从键中提取分词
     */
    private String extractTokenFromKey(String key) {
        String[] parts = key.split(":");
        return parts.length >= 4 ? parts[3] : "";
    }

    /**
     * 获取搜索词的分词权重
     */
    private Map<String, Integer> getTokenWeights(String query) {
        Map<String, Integer> tokenWeights = new HashMap<>();

        // 1. 完整词权重
        tokenWeights.put(query, FULL_WORD_SCORE);

        // 2. 子词权重
        List<SegToken> subTokens = segmenterLocal.get().process(query, JiebaSegmenter.SegMode.SEARCH);
        for (SegToken token : subTokens) {
            String word = token.word;
            if (word.length() > 1 && !word.equals(query) && !STOP_WORDS.contains(word)) {
                tokenWeights.put(word, SUB_WORD_SCORE);
            }
        }

        // 3. 单字权重
        if (query.length() > 1) {
            for (char c : query.toCharArray()) {
                String charStr = String.valueOf(c);
                if (!STOP_WORDS.contains(charStr)) {
                    tokenWeights.put(charStr, SINGLE_CHAR_SCORE);
                }
            }
        }

        return tokenWeights;
    }

    /**
     * 构建索引（增加调试）
     */
    private void buildIndex(String business, Long id, Map<String, String> fieldMap) {
        String idStr = id.toString();
        
        for (Map.Entry<String, String> entry : fieldMap.entrySet()) {
            String fieldName = entry.getKey();
            String fieldValue = entry.getValue();
            
            if (fieldValue == null || fieldValue.trim().isEmpty()) {
                continue;
            }

            Set<String> tokenSet = getIndexTokens(fieldValue);
            logger.debug("字段[{}]的分词结果: {}", fieldName, tokenSet);
            
            for (String token : tokenSet) {
                String indexKey = INDEX_PREFIX + business + ":" + fieldName + ":" + token;
                redisTemplate.opsForSet().add(indexKey, idStr);
                redisTemplate.expire(indexKey, EXPIRE_TIME, TimeUnit.SECONDS);
                logger.debug("构建索引 - key: {}, id: {}", indexKey, idStr);
            }
        }
    }

    /**
     * 获取索引分词
     */
    private Set<String> getIndexTokens(String text) {
        Set<String> tokenSet = new HashSet<>();
        
        List<SegToken> indexTokens = segmenterLocal.get().process(text, JiebaSegmenter.SegMode.INDEX);
        indexTokens.forEach(token -> addValidToken(tokenSet, token.word));
        
        List<SegToken> searchTokens = segmenterLocal.get().process(text, JiebaSegmenter.SegMode.SEARCH);
        searchTokens.forEach(token -> addValidToken(tokenSet, token.word));
        
        return tokenSet;
    }

    /**
     * 添加有效的分词
     */
    private void addValidToken(Set<String> tokenSet, String word) {
        if (word == null || word.trim().isEmpty()) {
            return;
        }
        // 只过滤停用词，保留单字（增加匹配可能性）
        if (STOP_WORDS.contains(word)) {
            return;
        }
        tokenSet.add(word);
    }

    /**
     * 参数校验
     */
    private void validateParams(String business, Long id, Map<String, String> fieldMap) {
        if (business == null || business.trim().isEmpty()) {
            throw new IllegalArgumentException("业务类型不能为空");
        }
        if (id == null || id <= 0) {
            throw new IllegalArgumentException("ID必须为正整数");
        }
        if (fieldMap == null || fieldMap.isEmpty()) {
            throw new IllegalArgumentException("字段映射不能为空");
        }
    }

    /**
     * 保存原始数据
     */
    private void saveOriginalData(String business, Long id, Map<String, String> fieldMap) throws JsonProcessingException {
        String dataKey = DATA_PREFIX + business + ":" + id;
        String json = objectMapper.writeValueAsString(fieldMap);
        redisTemplate.opsForValue().set(dataKey, json, EXPIRE_TIME, TimeUnit.SECONDS);
        logger.debug("保存原始数据 - key: {}", dataKey);
    }

    /**
     * 搜索结果实体类
     */
    public static class ScoredResult {
        private final Long id;
        private final Integer score;

        public ScoredResult(Long id, Integer score) {
            this.id = id;
            this.score = score;
        }

        public Long getId() { return id; }
        public Integer getScore() { return score; }

        @Override
        public String toString() {
            return "ScoredResult{id=" + id + ", score=" + score + "}";
        }
    }
}
    