package com.thinvent.recommend.manager.manager.impl;

import co.elastic.clients.elasticsearch.ElasticsearchClient;
import co.elastic.clients.elasticsearch._types.ElasticsearchException;
import co.elastic.clients.elasticsearch.core.BulkRequest;
import co.elastic.clients.elasticsearch.core.BulkResponse;
import co.elastic.clients.elasticsearch.core.bulk.BulkOperation;
import co.elastic.clients.elasticsearch.core.search.Hit;
import com.thinvent.recommend.common.enums.FileExtensionEnums;
import com.thinvent.recommend.common.enums.PositionKeywordEnums;
import com.thinvent.recommend.entity.KbFileInfo;
import com.thinvent.recommend.entity.TEhrUserInfo;
import com.thinvent.recommend.manager.dto.KbFileContentDTO;
import com.thinvent.recommend.manager.dto.RecommendedDocDTO;
import com.thinvent.recommend.manager.manager.JiebaManager;
import com.thinvent.recommend.manager.manager.Text2VecHelperManager;
import com.thinvent.recommend.manager.manager.TfIdfVectorManager;
import com.thinvent.recommend.manager.parser.TikaDocumentParser;
import com.thinvent.recommend.mapper.TEhrUserInfoMapper;
import io.minio.GetObjectArgs;
import io.minio.MinioClient;
import io.minio.StatObjectArgs;
import io.minio.errors.ErrorResponseException;
import org.apache.commons.lang3.StringUtils;
import org.mozilla.universalchardet.UniversalDetector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;

import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.time.Instant;
import java.util.*;

/**
 * 文本向量化辅助管理器，负责下载、按类型解析、TF–IDF 计算和 ES upsert
 */
@Component
public class Text2VecHelperManagerImpl implements Text2VecHelperManager {

    private static final Logger log = LoggerFactory.getLogger(Text2VecHelperManagerImpl.class);

    private static final Set<String> SUPPORTED_EXTS;
    static {
        Set<String> s = new HashSet<>();
        for (FileExtensionEnums ext : FileExtensionEnums.values()) {
            s.add(ext.getExtension());
        }
        SUPPORTED_EXTS = Collections.unmodifiableSet(s);
    }

    private final MinioClient minioClient;
    private final TikaDocumentParser tikaParser;
    private final TfIdfVectorManager tfIdfVectorManager;
    private final ElasticsearchClient esClient;
    private final JiebaManager jiebaManager;

    @Value("${elasticsearch.rest.indexName}")
    private String indexName;

    /** 向量维度，需与 ES mapping 一致 */
    private final int vectorDims;

    @Autowired
    public Text2VecHelperManagerImpl(
            @Value("${elasticsearch.vector.dims}") int vectorDims,
            ElasticsearchClient esClient,
            TfIdfVectorManager tfIdfVectorManager,
            MinioClient minioClient,
            TikaDocumentParser tikaParser,
            JiebaManager jiebaManager) {
        this.vectorDims = vectorDims;
        this.esClient = esClient;
        this.tfIdfVectorManager = tfIdfVectorManager;
        this.minioClient = minioClient;
        this.tikaParser = tikaParser;
        this.jiebaManager = jiebaManager;
    }

    @Override
    public List<KbFileContentDTO> fetchAndParse(List<KbFileInfo> infos) {
        List<KbFileContentDTO> docs = new ArrayList<>();
        for (KbFileInfo info : infos) {
            String path = info.getSaveFullPath();
            if (path == null) continue;
            if (path.startsWith("/")) path = path.substring(1);

            String ext = "";
            int idx = path.lastIndexOf('.');
            if (idx >= 0 && idx < path.length() - 1) {
                ext = path.substring(idx + 1).toLowerCase();
            }
            if (!SUPPORTED_EXTS.contains(ext)) {
                log.info("跳过文件[{}]，不支持后缀：{}", info.getId(), ext);
                continue;
            }

            try {
                minioClient.statObject(StatObjectArgs.builder()
                        .bucket(info.getSpaceId())
                        .object(path)
                        .build());
            } catch (ErrorResponseException e) {
                if ("NoSuchKey".equals(e.errorResponse().code())) {
                    log.warn("文件不存在，跳过：{}/{}", info.getSpaceId(), path);
                } else {
                    log.error("检查文件失败：{}/{}，{}", info.getSpaceId(), path, e.getMessage());
                }
                continue;
            } catch (Exception e) {
                log.error("检查文件异常：{}/{}，", info.getSpaceId(), path, e);
                continue;
            }

            byte[] data;
            try (InputStream in = minioClient.getObject(GetObjectArgs.builder()
                    .bucket(info.getSpaceId())
                    .object(path)
                    .build());
                 ByteArrayOutputStream buf = new ByteArrayOutputStream()) {

                byte[] b = new byte[4096];
                int len;
                while ((len = in.read(b)) != -1) {
                    buf.write(b, 0, len);
                }
                data = buf.toByteArray();
            } catch (Throwable t) {
                log.error("文档[{}] 下载失败：", info.getId(), t);
                continue;
            }

            String content = StringUtils.EMPTY;
            try {
                if ("txt".equals(ext)) {
                    content = decodeTxt(data);
                } else {
                    content = tikaParser.parse(data);
                }
            } catch (Exception ex) {
                log.error("文档[{}] 解析失败：", info.getId(), ex);
            }

            log.info("文档[{}] 内容长度: {}", info.getId(), content.length());
            docs.add(KbFileContentDTO.from(info, content));
        }
        return docs;
    }

    @Override
    public Map<String, Map<String, Double>> computeTfIdf(List<KbFileContentDTO> docs) {
        if (docs.isEmpty()) {
            return Collections.emptyMap();
        }
        Map<String, Map<String, Double>> vectors =
                tfIdfVectorManager.computeTfIdfVectors(docs);
        log.info("TF–IDF 计算完成，共 {} 篇文档。", vectors.size());
        return vectors;
    }

    @Override
    public Map<String, Double> computeTfIdfForTerms(List<String> terms) throws Exception {
        // 1. 把岗位关键词拼成一段文本
        String raw = String.join(" ", terms);

        // 2. 用 Jieba 切词，保持和文档端分词一致
        List<String> tokens = jiebaManager.cutPrecise(raw);
        if (tokens.isEmpty()) {
            throw new IllegalArgumentException("岗位关键词经分词后为空");
        }
        String content = String.join(" ", tokens);

        // 3. 构造一个“查询文档” DTO
        KbFileContentDTO queryDoc = new KbFileContentDTO();
        queryDoc.setId("__QUERY__");
        queryDoc.setContent(content);

        // 4. 调用已有的 TF–IDF 计算（会对唯一文档做 TF、IDF=1、L2 归一化）
        Map<String, Map<String, Double>> allVectors =
                tfIdfVectorManager.computeTfIdfVectors(Collections.singletonList(queryDoc));

        // 5. 拿到查询向量
        Map<String, Double> queryVec = allVectors.get("__QUERY__");
        if (queryVec == null) {
            throw new IllegalStateException("未能计算出岗位关键词的 TF–IDF 向量");
        }
        return queryVec;
    }




    @Override
    public void upsertToElasticsearch(Map<String, Map<String, Double>> vectors) {
        if (vectors == null || vectors.isEmpty()) {
            log.warn("没有任何向量，跳过 upsert。");
            return;
        }

        // 确保索引存在（略，与当前实现保持一致）

        List<BulkOperation> ops = new ArrayList<>();
        String now = Instant.now().toString();

        for (Map.Entry<String, Map<String, Double>> entry : vectors.entrySet()) {
            String docId = entry.getKey();
            Map<String, Double> vecMap = entry.getValue();
            List<Double> denseVec = toDenseVector(vecMap);

            boolean isZero = denseVec.stream().allMatch(v -> v == 0.0);
            if (isZero) {
                log.warn("文档[{}] 全零向量，跳过 upsert。", docId);
                continue;
            }

            // -- 新增 docId 到文档源中 --
            Map<String, Object> updateDoc = new HashMap<>();
            updateDoc.put("docId",      docId);
            updateDoc.put("updateTime", now);
            updateDoc.put("vec",        denseVec);

            Map<String, Object> upsertDoc = new HashMap<>(updateDoc);
            upsertDoc.put("createTime", now);

            ops.add(BulkOperation.of(b -> b
                    .update(u -> u
                            .index(indexName)
                            .id(docId)
                            .action(a -> a
                                    .doc(updateDoc)
                                    .upsert(upsertDoc)
                            )
                    )
            ));
        }

        if (ops.isEmpty()) {
            log.warn("所有文档均为全零向量，跳过 bulk upsert。");
            return;
        }

        try {
            BulkResponse resp = esClient.bulk(BulkRequest.of(b -> b.operations(ops)));
            resp.items().forEach(item -> {
                if (item.error() != null) {
                    log.error("文档[{}] 更新失败：{}", item.id(), item.error().reason());
                } else {
                    log.info("文档[{}] upsert 成功，操作：{}", item.id(), item.result());
                }
            });
        } catch (Exception ex) {
            log.error("Bulk 批量写入异常：", ex);
        }
    }

    /** 探测 txt 编码并解码 */
    private String decodeTxt(byte[] data) {
        UniversalDetector detector = new UniversalDetector(null);
        detector.handleData(data, 0, data.length);
        detector.dataEnd();
        String charset = detector.getDetectedCharset();
        detector.reset();
        if (charset == null) {
            charset = StandardCharsets.UTF_8.name();
        }
        return new String(data, Charset.forName(charset));
    }

    /** 将稀疏 Map 转为定长 dense-vector */
    private List<Double> toDenseVector(Map<String, Double> vecMap) {
        List<Double> arr = new ArrayList<>(vectorDims);
        for (int i = 0; i < vectorDims; i++) {
            arr.add(vecMap.getOrDefault(String.valueOf(i), 0.0));
        }
        return arr;
    }
}
