package com.heima.wemedia.service.impl;

import com.alibaba.fastjson.JSON;
import com.baomidou.mybatisplus.core.toolkit.Wrappers;
import com.heima.apis.article.IArticleClient;
import com.heima.common.aliyun.GreenImageScan;
import com.heima.common.aliyun.GreenTextScan;
import com.heima.common.exception.CustomException;
import com.heima.common.tess4j.Tess4jClient;
import com.heima.file.service.FileStorageService;
import com.heima.model.article.dtos.ArticleDto;
import com.heima.model.common.dtos.ResponseResult;
import com.heima.model.common.enums.AppHttpCodeEnum;
import com.heima.model.wemedia.pojos.WmNews;
import com.heima.model.wemedia.pojos.WmSensitive;
import com.heima.utils.common.SensitiveWordUtil;
import com.heima.wemedia.builder.ArticleDtoBuilder;
import com.heima.wemedia.mapper.WmChannelMapper;
import com.heima.wemedia.mapper.WmNewsMapper;
import com.heima.wemedia.mapper.WmSensitiveMapper;
import com.heima.wemedia.mapper.WmUserMapper;
import com.heima.wemedia.service.WmNewsAutoScanService;
import org.apache.commons.lang.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;

import javax.imageio.ImageIO;
import java.awt.image.BufferedImage;
import java.io.ByteArrayInputStream;
import java.util.*;
import java.util.stream.Collectors;

@Service
public class WmNewsAutoScanServiceImpl implements WmNewsAutoScanService {
    @Autowired
    private WmNewsMapper wmNewsMapper;

    @Autowired
    private GreenTextScan greenTextScan;

    @Autowired
    private GreenImageScan greenImageScan;

    @Autowired
    private FileStorageService fileStorageService;

    @Autowired
    private IArticleClient client;

    @Autowired
    private WmUserMapper wmUserMapper;

    @Autowired
    private WmChannelMapper wmChannelMapper;

    @Autowired
    private WmSensitiveMapper wmSensitiveMapper;

    @Autowired
    private Tess4jClient tess4jClient;
    /**
     * 自媒体文章审核
     *
     * @param id 自媒体文章id
     */
    @Override
    @Async
    public void autoScanWmNews(Integer id) throws Exception {
        //1. 通过文章的id查询当前的文章数据
        WmNews wmNews = wmNewsMapper.selectById(id);
        //2. 判断文章数据是否存在如果不存在则抛出异常
        if(null == wmNews){
            throw new CustomException(AppHttpCodeEnum.DATA_NOT_EXIST);
        }

        //3. 完成机器审核的判断
        if(wmNews.getStatus().equals(WmNews.Status.SUBMIT.getCode())){
            //3.1 抽取文章中的文本数据和图片数据-Map
            Map<String,Object> data = handleTextAndImages(wmNews);

            //3.0 校验文本中是否包含了自定义的铭感词
            boolean isSensitive = handleSensitiveScan(data.get("textContent").toString(), wmNews);
            if(!isSensitive){
                return;
            }

            //3.2 发送文本数据到阿里云进行审核 如果申通不通过则更新文章的审核状态和原因
            boolean isTextScan = handleTextScan(data.get("textContent").toString(), wmNews);
            if(!isTextScan){
                return;
            }

            //3.3 下载所有的图片数据 然后发送阿里云进行图片的审核 如果申通不通过则更新文章的审核状态和原因
            boolean isImageScan = handleImageScan((List<String>) data.get("images"), wmNews);
            if(!isImageScan){
                return;
            }

            //3.4 调用Feign请求完成数据的同步
            ResponseResult responseResult = saveAppArticle(wmNews);

            //3.5 更新文章的状态和app_article表中的id到自媒体文章的表中
            if(!responseResult.getCode().equals(200)){
                throw new CustomException(AppHttpCodeEnum.SERVER_ERROR);
            }

            wmNews.setArticleId(Long.parseLong(responseResult.getData().toString()));

            updateWmNewStatus(wmNews, (short)9, "审核通过");
        }

    }

    /**
     * 自定义铭感词的审核
     * @param textContent
     * @param wmNews
     * @return
     */
    private boolean handleSensitiveScan(String textContent, WmNews wmNews) {
        boolean flag = true;
        //1. 查询所有的铭感词 - 投影查询 select sensitives
        List<WmSensitive> list = wmSensitiveMapper.selectList(Wrappers.<WmSensitive>lambdaQuery().select(WmSensitive::getSensitives));
        List<String> words = list.stream().map(item -> item.getSensitives()).collect(Collectors.toList()); // 所有的敏感词

        //2. 初始化DFA的最大的Map
        SensitiveWordUtil.initMap(words);

        //3. 使用DFA算法进行校验
        Map<String, Integer> map = SensitiveWordUtil.matchWords(textContent);
        if(null != map && map.size() >0){
            flag = false;
            updateWmNewStatus(wmNews, (short)2, "文章中包含了自定义的敏感词" + map);
        }

        //4. 返回校验的结果
        return flag;
    }

    /**
     * 数据同步-Feign请求来同步数据
     * @param wmNews
     * @return
     */
    private ResponseResult saveAppArticle(WmNews wmNews) {

        /*ArticleDto dto = new ArticleDto();
        BeanUtils.copyProperties(wmNews, dto);

        dto.setLayout(wmNews.getType());
        dto.setCreatedTime(new Date());
        dto.setCollection(0);
        dto.setComment(0);
        dto.setViews(0);
        dto.setLikes(0);

        //查询作者相关的信息
        WmUser wmUser = wmUserMapper.selectById(wmNews.getUserId());
        dto.setAuthorId((long)wmNews.getUserId());
        if(null != wmUser){
            dto.setAuthorName(wmUser.getName());
        }

        //处理频道数据
        WmChannel wmChannel = wmChannelMapper.selectById(wmNews.getChannelId());
        if(null != wmChannel){
            dto.setChannelName(wmChannel.getName());
        }
*/
        ArticleDtoBuilder builder = new ArticleDtoBuilder(wmNews, wmChannelMapper, wmUserMapper);

        ArticleDto dto = builder
                .buildBeanCopy()
                .bulidBasic()
                .bulidAuth()
                .bulidChannel()
                .bulid();

        ResponseResult responseResult = client.saveArticle(dto);

        return responseResult;
    }

    /**
     * 图片审核的方法
     * @param images
     * @param wmNews
     * @return
     */
    private boolean handleImageScan(List<String> images, WmNews wmNews) throws Exception {
        boolean flag = true;
        if(null == images || images.size() == 0){
            return flag;
        }

        //1. 对图片集合进行数据去重
        images = images.stream().distinct().collect(Collectors.toList());
        //2. 从Minio中下载图片数据
        List<byte[]> imageList = new ArrayList<>(images.size());
        for (String imageUrl : images) {
            byte[] bytes = fileStorageService.downLoadFile(imageUrl);

            //添加图片的识别和自定义敏感词的审核
            ByteArrayInputStream in = new ByteArrayInputStream(bytes);
            BufferedImage image = ImageIO.read(in);
            String text = tess4jClient.doOCR(image);
            boolean isSensitive = handleSensitiveScan(text, wmNews);
            if(!isSensitive){
                flag = false;
                return flag;
            }


            imageList.add(bytes);
        }

        //3. 把图片数据一起打包发给阿里云进行审核并解析状态
        Map map = greenImageScan.imageScan(imageList);

        flag = parseScanResponse(wmNews, flag, map);

        return flag;
    }

    private boolean parseScanResponse(WmNews wmNews, boolean flag, Map map) {
        if (null != map) {
            //审核失败
            if ("block".equals(map.get("suggestion"))) {
                updateWmNewStatus(wmNews, (short) 2, "文章中存在违规的内容");
                flag = false;
            } else if ("review".equals(map.get("suggestion"))) {
                updateWmNewStatus(wmNews, (short) 3, "文中中存在不确定的因素需要人工进行审核");
                flag = false;
            }
        }
        return flag;
    }

    /**
     * 审核文中的文本数据
     * @param textContent
     * @return
     */
    private boolean handleTextScan(String textContent, WmNews wmNews) throws Exception {
        boolean flag = true;

        Map map = greenTextScan.greeTextScan(textContent);
        flag = parseScanResponse(wmNews, flag, map);

        return flag;
    }

    /**
     * 更新文章的审核状态和失败原因
     * @param wmNews
     * @param status
     * @param reason
     */
    private void updateWmNewStatus(WmNews wmNews, short status, String reason) {
        wmNews.setStatus(status);
        wmNews.setReason(reason);

        wmNewsMapper.updateById(wmNews);
    }

    /**
     * 抽取文中额文本和图片数据
     * @param wmNews
     * @return
     */
    private Map<String,Object> handleTextAndImages(WmNews wmNews) {
        //保存文本的数据
        StringBuffer sb = new StringBuffer();
        //保存图片数据
        List<String> images = new ArrayList<>();

        if(StringUtils.isNotEmpty(wmNews.getContent())){
            List<Map> list = JSON.parseArray(wmNews.getContent(), Map.class);

            for (Map map : list) {
                if("image".equals(map.get("type"))){
                    images.add(map.get("value").toString());
                }else if("text".equals(map.get("type"))){
                    sb.append(map.get("value").toString());
                }
            }
        }

        //sb中添加标题的数据
        sb.append(wmNews.getTitle());

        //images中添加封面的图片数据
        if(StringUtils.isNotEmpty(wmNews.getImages())){
            String[] imgs = wmNews.getImages().split(",");
            images.addAll(Arrays.asList(imgs));
        }

        Map<String,Object> map = new HashMap<>();

        map.put("textContent", sb.toString());
        map.put("images", images);

        return map;
    }
}
