package org.csu.mybigpro.service;

import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.csu.mybigpro.DTO.PlagiarismReportDto;
import org.csu.mybigpro.DTO.SimilarityDetailDto;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile;

import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;

@Service
public class PlagiarismService {

    private static final Logger log = LoggerFactory.getLogger(PlagiarismService.class);

    @Autowired
    private AiService aiService;

    @Autowired
    private WordParserService wordParserService;

    private final ObjectMapper objectMapper = new ObjectMapper();

    public List<PlagiarismReportDto> analyzeSubmissions(List<MultipartFile> files) throws Exception {
        if (files == null || files.size() < 2) {
            throw new IllegalArgumentException("请至少上传两份作业进行比对。");
        }

        List<Map.Entry<String, String>> documentTexts = new ArrayList<>();
        for (MultipartFile file : files) {
            try {
                String text = wordParserService.extractText(file);
                if (text != null && !text.isBlank()) {
                    documentTexts.add(Map.entry(file.getOriginalFilename(), text));
                }
            } catch (Exception e) {
                log.error("读取文件 '{}' 时出错: {}", file.getOriginalFilename(), e.getMessage());
            }
        }

        if (documentTexts.size() < 2) {
            return new ArrayList<>();
        }

        List<PlagiarismReportDto> reports = new ArrayList<>();
        for (int i = 0; i < documentTexts.size(); i++) {
            for (int j = i + 1; j < documentTexts.size(); j++) {
                Map.Entry<String, String> docA = documentTexts.get(i);
                Map.Entry<String, String> docB = documentTexts.get(j);

                Map<String, Object> analysisResult = getAiPlagiarismAnalysis(docA.getValue(), docB.getValue());
                double score = (double) analysisResult.get("score");
                String justification = (String) analysisResult.get("justification");
                List<SimilarityDetailDto> details = (List<SimilarityDetailDto>) analysisResult.get("details");

                reports.add(new PlagiarismReportDto(docA.getKey(), docB.getKey(), score, justification, details));
            }
        }

        reports.sort((a, b) -> Double.compare(b.getOverallScore(), a.getOverallScore()));
        return reports;
    }

    private Map<String, Object> getAiPlagiarismAnalysis(String textA, String textB) {
        // 【修正】Prompt中的字段名全部改为驼峰式
        String prompt = "你是一个顶级的、模仿中国知网（CNKI）查重引擎的AI分析器。你的任务是逐句比对下面两篇文稿，找出并报告所有相似的句子。\n\n" +
                "【重要指令】: 请忽略那些因共同主题（例如《我的梦想》）而必然相似的、非常简短或模板化的句子。例如，忽略像‘每个人都有梦想’这样的引言。只关注那些表达具体观点、描述或独特措辞的、有实质性内容的相似句。\n\n" +
                "--- 文稿A ---\n" + textA + "\n\n" +
                "--- 文稿B ---\n" + textB + "\n\n" +
                "--- 你的分析报告 ---\n" +
                "请返回一个严格的JSON对象，包含以下三个字段：\n" +
                "1. `overallScore`: 一个0.0到1.0之间的小数，代表你对这两篇文稿整体的抄袭风险评估。\n" +
                "2. `justification`: 一段简短的、总结性的文字，作为总体的判决依据，解释你为什么给出这个分数。\n" +
                "3. `similarityDetails`: 一个JSON数组，其中每个元素都是一个对象，代表一对你认为有抄袭嫌疑的相似句。每个对象都应包含：`sentenceFromA` (来自文稿A的句子), `sentenceFromB` (在文稿B中找到的对应相似句), `similarity` (这对句子的相似度，0.0到1.0)。如果未发现任何有抄袭嫌疑的句子，`similarityDetails` 必须是一个空数组 `[]`。";

        try {
            String rawResponse = aiService.generateContent(List.of(Map.of("role", "user", "parts", List.of(Map.of("text", prompt)))), null, null);
            JsonNode rootNode = objectMapper.readTree(rawResponse);
            String textContent = rootNode.path("candidates").path(0).path("content").path("parts").path(0).path("text").asText();
            String cleanJson = extractJsonFromText(textContent);
            JsonNode analysisNode = objectMapper.readTree(cleanJson);

            // 【修正】解析JSON时也使用驼峰式字段名
            double score = analysisNode.path("overallScore").asDouble(0.0);
            String justification = analysisNode.path("justification").asText("AI未能提供总结性的判决依据。");
            List<SimilarityDetailDto> details = Collections.emptyList();
            if (analysisNode.has("similarityDetails")) {
                details = objectMapper.convertValue(analysisNode.path("similarityDetails"), new TypeReference<>() {});
            }

            return Map.of("score", score, "justification", justification, "details", details);
        } catch (Exception e) {
            log.error("调用AI进行抄袭分析时出错", e);
            throw new RuntimeException("AI分析服务暂时无法连接，请稍后重试。", e);
        }
    }

    private String extractJsonFromText(String text) {
        String trimmedText = text.trim();
        int firstBrace = trimmedText.indexOf('{');
        int lastBrace = trimmedText.lastIndexOf('}');
        if (firstBrace != -1 && lastBrace != -1 && lastBrace > firstBrace) {
            return trimmedText.substring(firstBrace, lastBrace + 1);
        }
        return "{}";
    }
}