package cn.echoparrot.domain.service;

import java.io.InputStream;
import java.net.URI;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
import java.util.HashMap;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversation;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationParam;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationResult;
import com.alibaba.dashscope.common.MultiModalMessage;
import com.alibaba.dashscope.common.Role;
import com.alibaba.dashscope.exception.ApiException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.exception.UploadFileException;
import lombok.extern.slf4j.Slf4j;
import org.apache.pdfbox.pdfparser.PDFParser;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.apache.pdfbox.pdmodel.PDPage;
import org.apache.pdfbox.pdmodel.PDResources;
import org.apache.pdfbox.pdmodel.graphics.image.PDImageXObject;
import org.apache.pdfbox.cos.COSName;
import org.springframework.ai.document.Document;
import org.springframework.ai.document.DocumentReader;

import javax.imageio.ImageIO;
import java.awt.image.BufferedImage;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Base64;
import java.util.ArrayList;
import java.util.List;

@Slf4j
public class QwenOcrReader implements DocumentReader {

    private final Path path;
    private final String apiKey;

    public QwenOcrReader(Path path, String dashScopeApiKey) {
        this.path = path;
        this.apiKey = dashScopeApiKey;
    }

    @Override
    public List<Document> get() {
        try {
            if(path.toString().endsWith(".pdf")){
                List<Map<String,Object>> maps = extractImagePdf(path);
                return callOcr( maps);
            }else{
                return List.of(new Document(simpleMultiModalConversationCall(path.toAbsolutePath().toString())));
            }

        } catch (NoApiKeyException e) {
            log.error("请配置DASHSCOPE_API_KEY环境变量", e);
            return Collections.emptyList();
        } catch (UploadFileException e) {
            log.error("请检查文件路径是否正确", e);
            return Collections.emptyList();
        }
    }

    public String simpleMultiModalConversationCall(String localPath)
            throws ApiException, NoApiKeyException, UploadFileException {
        String filePath;
        try {
            // 将Windows路径转换为标准的file:// URI格式
            // 修复Windows路径中D:这类盘符导致的authority component错误
            if (localPath.contains(":")) {
                // 对于包含盘符的Windows路径，使用三斜线格式
                filePath = "file:///" + localPath.replace("\\", "/");
            } else {
                filePath = new URI("file", "", localPath.replace("\\", "/"), null).toString();
            }
        } catch (Exception e) {
            // 如果URI转换失败，回退到原始方式并记录日志
            log.warn("URI转换失败，使用原始路径: " + e.getMessage());
            filePath = "file:///" + localPath.replace("\\", "/");
        }
        
        MultiModalConversation conv = new MultiModalConversation();
        Map<String, Object> map = new HashMap<>();
        map.put("image", filePath);
        // 输入图像的最大像素阈值，超过该值图像会按原比例缩小，直到总像素低于max_pixels
        map.put("max_pixels", "6422528");
        // 输入图像的最小像素阈值，小于该值图像会按原比例放大，直到总像素大于min_pixels
        map.put("min_pixels", "3136");
        // 开启图像自动转正功能
        map.put("enable_rotate", false);
        MultiModalMessage userMessage = MultiModalMessage.builder().role(Role.USER.getValue())
                .content(Arrays.asList(
                        map
                        // qwen-vl-ocr-latest未设置内置任务时，支持在以下text字段中传入Prompt，若未传入则使用默认的Prompt：Please output only the text content from the image without any additional descriptions or formatting.
                        // 如调用qwen-vl-ocr-1028，模型会使用固定Prompt：Read all the text in the image.，不支持用户在text中传入自定义Prompt
                        //Collections.singletonMap("text", "请提取车票图像中的发票号码、车次、起始站、终点站、发车日期和时间点、座位号、席别类型、票价、身份证号码、购票人姓名。要求准确无误的提取上述关键信息、不要遗漏和捏造虚假信息，模糊或者强光遮挡的单个文字可以用英文问号?代替。返回数据格式以json方式输出，格式为：{'发票号码'：'xxx', '车次'：'xxx', '起始站'：'xxx', '终点站'：'xxx', '发车日期和时间点'：'xxx', '座位号'：'xxx', '席别类型'：'xxx','票价':'xxx', '身份证号码'：'xxx', '购票人姓名'：'xxx'")
                        )).build();

        log.debug("apiKey: " + apiKey);
        MultiModalConversationParam param = MultiModalConversationParam.builder()
                // 若没有配置环境变量，请用百炼API Key将下行替换为：.apiKey("sk-xxx")
                // 新加坡和北京地域的API Key不同。获取API Key：https://help.aliyun.com/zh/model-studio/get-api-key
                .apiKey(apiKey)
                .model("qwen-vl-ocr-latest")
                .message(userMessage)
                .topP(0.001)
                .temperature(0.1f)
                .maxLength(8192)
                .build();
        MultiModalConversationResult result = conv.call(param);
        String text = result.getOutput().getChoices().get(0).getMessage().getContent().get(0).get("text").toString();
        System.out.println(text);
        return text;
    }

    public List<Map<String,Object>> extractImagePdf(Path path) {
        List<Map<String,Object>> imageList = new ArrayList<>();

        try (InputStream inputStream = Files.newInputStream( path)) {
            PDFParser pdfParser = new PDFParser(
                    new org.apache.pdfbox.io.RandomAccessReadBuffer(inputStream));
            PDDocument document = pdfParser.parse();
            // 1. 获取页面数量
            int pageCount = document.getNumberOfPages();
            log.info("PDF共有 " + pageCount + " 页");

            // 2. 遍历每一页
            for (int i = 0; i < pageCount; i++) {
                PDPage page = document.getPage(i);
                PDResources resources = page.getResources();

                // 3. 获取页面中的所有资源
                Iterable<COSName> xObjectNames = resources.getXObjectNames();

                // 4. 遍历每个资源，检查是否为图像
                for (COSName name : xObjectNames) {
                    if (resources.isImageXObject(name)) {
                        PDImageXObject image = (PDImageXObject) resources.getXObject(name);

                        // 5. 获取图像数据
                        BufferedImage bImage = image.getImage();

                        // 6. 转换为Base64
                        ByteArrayOutputStream baos = new ByteArrayOutputStream();
                        String formatName = image.getSuffix();
                        // 如果格式不是常见的图像格式，默认使用PNG
                        if (!"png".equalsIgnoreCase(formatName) && !"jpeg".equalsIgnoreCase(formatName) && !"jpg".equalsIgnoreCase(formatName)) {
                            formatName = "png";
                        }
                        ImageIO.write(bImage, formatName, baos);
                        byte[] imageBytes = baos.toByteArray();
                        String base64Image = Base64.getEncoder().encodeToString(imageBytes);

                        // 7. 构造符合阿里云要求的image_url格式
                        Map<String, Object> imageInfo = new HashMap<>();
                        imageInfo.put("type", "image");
                        // 设置图像参数
                        imageInfo.put("min_pixels", "3136");
                        imageInfo.put("max_pixels", "6422528");
                        imageInfo.put("image", "data:image/" + formatName + ";base64," + base64Image);
                        
                        imageList.add(imageInfo);
                        
                        log.info("已提取图像: " + name.getName() + ", 格式: " + formatName);
                    }
                }
            }

            log.info("图像提取完成，共提取 " + imageList.size() + " 张图像");

        } catch (IOException e) {
            log.error("提取PDF图像时发生错误", e);
        }
        return imageList;
    }

    private List<Document> callOcr(List<Map<String,Object>> maps) throws NoApiKeyException, UploadFileException {
        MultiModalMessage userMessage = MultiModalMessage.builder().role(Role.USER.getValue())
                .content(maps).build();
        String apiKey = System.getenv("DASHSCOPE_API_KEY");
        log.debug("apiKey: " + apiKey);
        MultiModalConversationParam param = MultiModalConversationParam.builder()
                // 若没有配置环境变量，请用百炼API Key将下行替换为：.apiKey("sk-xxx")
                // 新加坡和北京地域的API Key不同。获取API Key：https://help.aliyun.com/zh/model-studio/get-api-key
                .apiKey(apiKey)
                .model("qwen-vl-ocr-latest")
                .message(userMessage)
                .topP(0.001)
                .temperature(0.1f)
                .maxLength(8192)
                .build();
        MultiModalConversation conv = new MultiModalConversation();
        MultiModalConversationResult result = conv.call(param);
        List<Document> texts = result.getOutput().getChoices().get(0).getMessage().getContent().stream()
                .map(content -> content.get("text").toString())
                .map(s -> new Document(s))
                .toList();

        return texts;
    }
}