package com.heima.wemedia.service.impl;

import com.alibaba.fastjson.JSON;
import com.baomidou.mybatisplus.core.toolkit.Wrappers;
import com.heima.apis.article.ArticleClient;
import com.heima.common.aliyun.GreenImageScan;
import com.heima.common.aliyun.GreenTextScan;
import com.heima.common.constants.ArticleConstants;
import com.heima.common.constants.WemediaConstants;
import com.heima.common.exception.CustomException;
import com.heima.common.tess4j.Tess4jClient;
import com.heima.file.service.FileStorageService;
import com.heima.model.article.dtos.ArticleDto;
import com.heima.model.common.dtos.ResponseResult;
import com.heima.model.common.enums.AppHttpCodeEnum;
import com.heima.model.search.vos.SearchArticleVo;
import com.heima.model.wemedia.pojos.WmNews;
import com.heima.model.wemedia.pojos.WmSensitive;
import com.heima.model.wemedia.pojos.WmUser;
import com.heima.utils.common.SensitiveWordUtil;
import com.heima.wemedia.bulider.ArticleDtoBuilder;
import com.heima.wemedia.mapper.WmChannelMapper;
import com.heima.wemedia.mapper.WmNewsMapper;
import com.heima.wemedia.mapper.WmSensitiveMapper;
import com.heima.wemedia.mapper.WmUserMapper;
import com.heima.wemedia.service.WmNewsAutoScanService;
import io.seata.spring.annotation.GlobalTransactional;
import org.apache.commons.lang.StringUtils;
import org.springframework.beans.BeanUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;

import javax.imageio.ImageIO;
import java.awt.image.BufferedImage;
import java.io.ByteArrayInputStream;
import java.util.*;
import java.util.stream.Collectors;

@Service
public class WmNewsAutoScanServiceImpl implements WmNewsAutoScanService {

    @Autowired
    private WmNewsMapper wmNewsMapper;

    @Autowired
    private GreenImageScan greenImageScan;

    @Autowired
    private GreenTextScan greenTextScan;

    @Autowired
    private FileStorageService fileStorageService;

    @Autowired
    private ArticleClient articleClient;

    @Autowired
    private WmChannelMapper wmChannelMapper;

    @Autowired
    private WmUserMapper wmUserMapper;

    @Autowired
    private WmSensitiveMapper wmSensitiveMapper;

    @Autowired
    private Tess4jClient tess4jClient;

    @Autowired
    private KafkaTemplate<String, String> kafkaTemplate;

    /**
     * 自媒体文章审核
     *
     * @param id 自媒体文章id
     */
    @Override
    @Async
    @GlobalTransactional
    public void autoScanWmNews(Integer id) throws Exception {
        //1. 通过id查询文章数据
        WmNews wmNews = wmNewsMapper.selectById(id);

        //2. 如果文章不存在则报错'
        if(null == wmNews){
            throw new CustomException(AppHttpCodeEnum.DATA_NOT_EXIST);
        }

        //3. 如果存在在判断当前的文章是否还需要审核
        if(WmNews.Status.SUBMIT.getCode() == wmNews.getStatus()){
            //4. 如果需要审核 则抽取出文章中所有的文本内容和图片内容
            Map<String, Object> data = handleTextAndImages(wmNews);

            // 自定义敏感词的审核
            Boolean flag = handleSensitiveScan(wmNews, data);
            if(!flag) return;


            //5. 调用阿里云的文本审核接口对文本内容进行审核 如果审核失败则更新文章的审核状态2 需要人工审核的更新为3 并把错误原因更新到数据库
            flag = handleTextScan(data.get("text").toString(), wmNews);
            if(!flag) return;

            //6. 从minio中下载所有的图片 并山川阿里云进行审核 如果审核失败则更新文章的审核状态2 需要人工审核的更新为3 并把错误原因更新到数据库
            flag = handleImageScan((List<String>) data.get("imagesUrls"), wmNews);
            if(!flag) return;

            //7. 通过Feign接口同步自媒体的文章到app端
            ResponseResult responseResult = saveAppArticle(wmNews);
            if(!responseResult.getCode().equals(200)){
                throw new CustomException(AppHttpCodeEnum.WM_FENTIN_FAIL);
            }


            //8. 更新文章的审核状态为9
            wmNews.setArticleId(Long.parseLong(responseResult.getData().toString()));
            updateWmNews(wmNews, WemediaConstants.WM_SCAN_SUCCESS, "审核成功");

            //发送消息
            SearchArticleVo vo = new SearchArticleVo();
            BeanUtils.copyProperties(wmNews, vo);

            WmUser wmUser = wmUserMapper.selectById(wmNews.getUserId());
            if(null != wmUser){
                vo.setAuthorName(wmUser.getName());
            }

            vo.setId(wmNews.getArticleId());
            vo.setAuthorId(wmNews.getUserId().longValue());
            vo.setLayout(wmNews.getType());
            vo.setStaticUrl("http://www.baidu.com");

            kafkaTemplate.send(ArticleConstants.ARTICLE_ES_SYNC_TOPIC, JSON.toJSONString(vo));
        }
    }

    private Boolean handleSensitiveScan(WmNews wmNews, Map<String, Object> data) {
        Boolean flag = Boolean.TRUE;

        //4.1 自定义敏感词的审核
        //通过投影查询 查询到自定义的敏感词
        List<WmSensitive> sensitiveList = wmSensitiveMapper.selectList(
                Wrappers.<WmSensitive>lambdaQuery().select(WmSensitive::getSensitives));
        //得到字符串格式的敏感词
        List<String> list = sensitiveList.stream().map(item -> item.getSensitives()).collect(Collectors.toList());

        // 初始化DFA算法的大的Map
        SensitiveWordUtil.initMap(list);

        // 审核文件内容 返回map 的key 就是敏感词 value 就是出现的次数
        Map<String, Integer> map = SensitiveWordUtil.matchWords(data.get("text").toString());
        if(null != map && map.size() > 0 ){
            updateWmNews(wmNews, WemediaConstants.WM_SCAN_FAIL, "文本中含有敏感词" + map);
            flag = Boolean.FALSE;
        }

        return flag;
    }

    /**
     * 同步数据到app
     * @param wmNews
     * @return
     */
    private ResponseResult saveAppArticle(WmNews wmNews) {
       /* ArticleDto dto = new ArticleDto();

        //拷贝基本属性
        BeanUtils.copyProperties(wmNews, dto);

        //处理拷贝不进去的数据
        dto.setLayout(wmNews.getType());
        dto.setAuthorId(wmNews.getUserId().longValue());

        //查询作者的名称 查询频道的名称
        WmChannel wmChannel = wmChannelMapper.selectById(wmNews.getChannelId());
        if(null != wmChannel){
            dto.setChannelName(wmChannel.getName());
        }

        WmUser wmUser = wmUserMapper.selectById(wmNews.getUserId());
        if(null != wmUser){
            dto.setAuthorName(wmUser.getName());
        }
        //设置文章id
        if(null != wmNews.getArticleId()){
            dto.setId(wmNews.getArticleId());
        }

        dto.setCreatedTime(new Date());*/

        ArticleDto dto = new ArticleDtoBuilder(wmNews, wmChannelMapper, wmUserMapper)
                .bulidAuthor()
                .bulidChannel()
                .bulidBasic()
                .bulidBeanCopy()
                .bulid();

        ResponseResult responseResult = articleClient.saveArticle(dto);
        return responseResult;
    }

    /**
     * 图片的审核
     * @param imageUrls
     * @param wmNews
     * @return
     */
    private Boolean handleImageScan(List<String> imageUrls, WmNews wmNews) throws Exception {
        Boolean flag = Boolean.TRUE;

        if(null == imageUrls || imageUrls.size() == 0){
            return flag;
        }

        //对图片进行去重
        imageUrls = imageUrls.stream().distinct().collect(Collectors.toList());

        List<byte[]> imageBytes = new ArrayList<>(imageUrls.size());

        for (String imageUrl : imageUrls) {
            byte[] bytes = fileStorageService.downLoadFile(imageUrl);

            //识别图片中的文字 并通过DFA算法进行审核
            ByteArrayInputStream inputStream = new ByteArrayInputStream(bytes);
            BufferedImage image = ImageIO.read(inputStream);
            String str = tess4jClient.doOCR(image);
            Map<String, Object> data = new HashMap<>();
            data.put("text", str);

            flag = handleSensitiveScan(wmNews, data);
            if(!flag) return flag;

            imageBytes.add(bytes);
        }

        try {
            Map map = greenImageScan.imageScan(imageBytes);
            if(null != map && map.size() > 0){
                if(map.get("suggestion").equals("block")){
                    //审核失败
                    flag = Boolean.FALSE;
                    updateWmNews(wmNews, WemediaConstants.WM_SCAN_FAIL, "图片中包含不健康的内容");
                }else if(map.get("suggestion").equals("review")){
                    //需要人工审核
                    flag = Boolean.FALSE;
                    updateWmNews(wmNews, WemediaConstants.WM_SCAN_REVIEW, "图片中包含不确定的内容需要人工审核");
                }
            }
        } catch (Exception e) {
            flag = Boolean.FALSE;
            updateWmNews(wmNews, WemediaConstants.WM_SCAN_REVIEW, "图片中包含不确定的内容需要人工审核");
        }

        return flag;
    }

    private Boolean handleTextScan(String content, WmNews wmNews) throws Exception {
        Boolean flag = Boolean.TRUE;

        try {
            Map map = greenTextScan.greeTextScan(content);
            if(null != map && map.size() > 0){
                if(map.get("suggestion").equals("block")){
                    //审核失败
                    flag = Boolean.FALSE;
                    updateWmNews(wmNews, WemediaConstants.WM_SCAN_FAIL, "文本中含有敏感词");
                }else if(map.get("suggestion").equals("review")){
                    //需要人工审核
                    flag = Boolean.FALSE;
                    updateWmNews(wmNews, WemediaConstants.WM_SCAN_REVIEW, "文本中含有不确定的数据需要人工审核");
                }
            }
        } catch (Exception e) {
            flag = Boolean.FALSE;
            updateWmNews(wmNews, WemediaConstants.WM_SCAN_REVIEW, "文本中含有不确定的数据需要人工审核");
        }

        return flag;
    }

    /**
     * 更新文章的审核状态和失败的原因
     * @param wmNews
     * @param status
     * @param reason
     */
    private void updateWmNews(WmNews wmNews, Short status, String reason) {
        wmNews.setStatus(status);
        wmNews.setReason(reason);

        wmNewsMapper.updateById(wmNews);
    }

    /**
     * 从文章中抽取出所有的文本内容和图片
     * @param wmNews
     * @return
     */
    private Map<String, Object> handleTextAndImages(WmNews wmNews) {
        StringBuffer sb = new StringBuffer();
        List<String> imageUrls = new ArrayList<>();

        //解析内容中的文本和图片
        List<Map> mapList = JSON.parseArray(wmNews.getContent(), Map.class);
        for (Map map : mapList) {
            if("text".equals(map.get("type"))){
                sb.append(map.get("value"));
            }else if("image".equals(map.get("type"))){
                imageUrls.add(map.get("value").toString());
            }
        }

        //添加标题中的文本数据
        sb.append("-").append(wmNews.getTitle());

        //添加封面中的图片
        if(StringUtils.isNotEmpty(wmNews.getImages())){
            imageUrls.addAll(Arrays.asList(wmNews.getImages().split(",")));
        }

        Map<String, Object> data = new HashMap<>();
        data.put("text", sb.toString());
        data.put("imagesUrls", imageUrls);

        return data;
    }
}
