package com.tianji.learning.utils;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.aliyun.green20220302.Client;
import com.aliyun.green20220302.models.TextModerationPlusRequest;
import com.aliyun.green20220302.models.TextModerationPlusResponse;
import com.aliyun.green20220302.models.TextModerationPlusResponseBody;
import com.aliyun.teaopenapi.models.Config;
import com.tianji.learning.enums.RiskLevel;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;

import javax.annotation.PostConstruct;
import java.util.ArrayList;
import java.util.List;

/**
 * 文本内容安全检测工具类
 */
@Slf4j
@Component
public class TextModerationUtil {

//    @Value("${aliyun.green.access-key-id:${ALIBABA_CLOUD_ACCESS_KEY_ID}}")
    @Value("${aliyun.green.access-key-id}")
    private String accessKeyId;

//    @Value("${aliyun.green.access-key-secret:${ALIBABA_CLOUD_ACCESS_KEY_SECRET}}")
    @Value("${aliyun.green.access-key-secret}")
    private String accessKeySecret;

    @Value("${aliyun.green.region-id:cn-shanghai}")
    private String regionId;

    @Value("${aliyun.green.endpoint:green-cip.cn-shanghai.aliyuncs.com}")
    private String endpoint;

    @Value("${aliyun.green.read-timeout:6000}")
    private Integer readTimeout;

    @Value("${aliyun.green.connect-timeout:3000}")
    private Integer connectTimeout;

    private Client client;

    /**
     * 初始化阿里云客户端
     */
    @PostConstruct
    public void init() {
        try {
            Config config = new Config()
                .setAccessKeyId(accessKeyId)
                .setAccessKeySecret(accessKeySecret)
                .setRegionId(regionId)
                .setEndpoint(endpoint)
                .setReadTimeout(readTimeout)
                .setConnectTimeout(connectTimeout);
            
            this.client = new Client(config);
            log.info("阿里云内容安全客户端初始化成功");
        } catch (Exception e) {
            log.error("阿里云内容安全客户端初始化失败", e);
            throw new RuntimeException("内容安全服务初始化失败", e);
        }
    }

    /**
     * 检测文本内容安全性
     * 
     * @param content 待检测文本
     * @return 检测结果
     */
    public TextModerationResult moderateText(String content) {
        return moderateText(content, "comment_detection_pro");
    }

    /**
     * 检测文本内容安全性（指定服务类型）
     * 
     * @param content 待检测文本
     * @param service 服务类型
     * @return 检测结果
     */
    public TextModerationResult moderateText(String content, String service) {
        TextModerationResult result = new TextModerationResult();
        
        if (content == null || content.trim().isEmpty()) {
            result.setRiskLevel(RiskLevel.NONE.getCode());
            result.setViolated(false);
            result.setViolationReasons(new ArrayList<>());
            result.setRiskLabels(new ArrayList<>());
            result.setRiskWords(new ArrayList<>());
            return result;
        }

        try {
            JSONObject serviceParameters = new JSONObject();
            serviceParameters.put("content", content);

            TextModerationPlusRequest request = new TextModerationPlusRequest()
                .setService(service)
                .setServiceParameters(serviceParameters.toJSONString());

            TextModerationPlusResponse response = client.textModerationPlus(request);
            
            if (response.getStatusCode() == 200) {
                TextModerationPlusResponseBody responseBody = response.getBody();
                result.setRequestId(responseBody.getRequestId());
                
                if (responseBody.getCode() == 200) {
                    TextModerationPlusResponseBody.TextModerationPlusResponseBodyData data = responseBody.getData();
                    return parseModerationResult(data, result);
                } else {
                    log.warn("文本检测服务返回错误: code={}, message={}", 
                            responseBody.getCode(), responseBody.getMessage());
                    // 服务错误时返回高风险，确保安全
                    result.setRiskLevel(RiskLevel.HIGH.getCode());
                    result.setViolated(true);
                    result.getViolationReasons().add("检测服务异常");
                }
            } else {
                log.error("文本检测HTTP请求失败: statusCode={}", response.getStatusCode());
                result.setRiskLevel(RiskLevel.HIGH.getCode());
                result.setViolated(true);
                result.getViolationReasons().add("检测服务不可用");
            }
        } catch (Exception e) {
            log.error("文本检测过程发生异常", e);
            result.setRiskLevel(RiskLevel.HIGH.getCode());
            result.setViolated(true);
            result.getViolationReasons().add("检测过程异常");
        }
        
        return result;
    }

    /**
     * 解析检测结果
     */
    private TextModerationResult parseModerationResult(
            TextModerationPlusResponseBody.TextModerationPlusResponseBodyData data,
            TextModerationResult result) {
        
        result.setRawData(JSON.toJSONString(data));
        result.setRiskLevel(data.getRiskLevel());
        
        // 判断是否违规
        boolean violated = !RiskLevel.NONE.getCode().equals(data.getRiskLevel());
        result.setViolated(violated);
        
        List<String> violationReasons = new ArrayList<>();
        List<String> riskLabels = new ArrayList<>();
        List<String> riskWords = new ArrayList<>();
        Double maxConfidence = 0.0;
        
        if (data.getResult() != null) {
            for (TextModerationPlusResponseBody.TextModerationPlusResponseBodyDataResult item : data.getResult()) {
                if (item.getDescription() != null && !"未检测出风险".equals(item.getDescription())) {
                    violationReasons.add(item.getDescription());
                }
                if (item.getLabel() != null && !"nonLabel".equals(item.getLabel())) {
                    riskLabels.add(item.getLabel());
                }
                if (item.getRiskWords() != null) {
                    // 分割风险词汇并去重
                    String[] words = item.getRiskWords().split(",");
                    for (String word : words) {
                        if (!word.trim().isEmpty() && !riskWords.contains(word.trim())) {
                            riskWords.add(word.trim());
                        }
                    }
                }
                if (item.getConfidence() != null && item.getConfidence() > maxConfidence) {
                    maxConfidence = Double.valueOf(item.getConfidence());
                }
            }
        }
        
        result.setViolationReasons(violationReasons);
        result.setRiskLabels(riskLabels);
        result.setRiskWords(riskWords);
        result.setConfidence(maxConfidence > 0 ? maxConfidence : null);
        
        return result;
    }

    /**
     * 快速检测文本是否违规
     * 
     * @param content 待检测文本
     * @return true-违规, false-安全
     */
    public boolean isTextViolated(String content) {
        TextModerationResult result = moderateText(content);
        return result.isViolated();
    }

    /**
     * 获取风险等级枚举
     * 
     * @param content 待检测文本
     * @return 风险等级枚举
     */
    public RiskLevel getRiskLevel(String content) {
        TextModerationResult result = moderateText(content);
        return RiskLevel.fromCode(result.getRiskLevel());
    }

    /**
     * 获取简化的检测结果描述
     * 
     * @param content 待检测文本
     * @return 结果描述
     */
    public String getModerationSummary(String content) {
        TextModerationResult result = moderateText(content);
        
        if (!result.isViolated()) {
            return "文本内容安全，无违规内容";
        }
        
        String levelDesc = RiskLevel.fromCode(result.getRiskLevel()).getDescription();
        String reasons = String.join("、", result.getViolationReasons());
        String words = result.getRiskWords().isEmpty() ? 
            "" : "，涉及词汇：" + String.join("、", result.getRiskWords());
            
        return String.format("文本存在%s风险，原因：%s%s", levelDesc, reasons, words);
    }
}