package com.dayouzi.crawler_monitor.service.impl;

import com.dayouzi.crawler_monitor.base.exception.BusinessException;
import com.dayouzi.crawler_monitor.base.result.R;
import com.dayouzi.crawler_monitor.base.result.ResponseEnum;
import com.dayouzi.crawler_monitor.config.DataSource;
import com.dayouzi.crawler_monitor.config.DataSourceContextHolder;
import com.dayouzi.crawler_monitor.config.esConfig.ElasticsearchClientProvider;
import com.dayouzi.crawler_monitor.mapper.CrawlerDetailMapper;
import com.dayouzi.crawler_monitor.mapper.CrawlerMapper;
import com.dayouzi.crawler_monitor.pojo.entity.FileJson2;
import com.dayouzi.crawler_monitor.pojo.page.PageBean;
import com.dayouzi.crawler_monitor.pojo.query.*;
import com.dayouzi.crawler_monitor.pojo.vo.CrawlerDataListVO;
import com.dayouzi.crawler_monitor.pojo.vo.CrawlerDataQueryVO;
import com.dayouzi.crawler_monitor.pojo.vo.CrawlerDataVO;
import com.dayouzi.crawler_monitor.service.CrawlerDetailService;
import com.dayouzi.crawler_monitor.service.CrawlerPolicyService;
import com.dayouzi.crawler_monitor.service.CrawlerService;
import com.dayouzi.crawler_monitor.utils.DateUtils;
import com.dayouzi.crawler_monitor.utils.GoFastUtils;
import com.dayouzi.crawler_monitor.utils.HumpToUnderLineUtil;
import com.dayouzi.crawler_monitor.utils.UrlUtils;
import com.google.gson.Gson;
import lombok.extern.slf4j.Slf4j;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.RangeQueryBuilder;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.sort.SortOrder;
import org.springframework.beans.BeanUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.config.ConfigurableBeanFactory;
import org.springframework.stereotype.Repository;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.util.StringUtils;
import org.springframework.web.multipart.MultipartFile;


import javax.annotation.Resource;
import java.io.IOException;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;

@Service
@Slf4j
@DataSource(value = "suidaobig")
public class CrawlerDetailServiceImpl implements CrawlerDetailService {

    @Autowired
    private CrawlerDetailMapper crawlerDetailMapper;

    @Autowired
    private CrawlerMapper crawlerMapper;

    @Autowired
    private CrawlerService crawlerService;

    @Autowired
    private CrawlerPolicyService crawlerPolicyService;

    private final RestHighLevelClient esClient;
    
    private Gson gson = new Gson();

    @Autowired
    public CrawlerDetailServiceImpl(RestHighLevelClient esClient) {
        this.esClient = esClient;
    }

    @Override
    public CrawlerDataVO getCrawlerDataById(CrawlerDataQuery crawlerDataQuery) {
        CrawlerDataVO crawlerData = crawlerDetailMapper.getCrawlerDataById(crawlerDataQuery);
        // 如果没查到数据
        if (crawlerData == null) {
            return new CrawlerDataVO();
        }
        
        // 查询intro
        DataSourceContextHolder.setDataSourceType("onlyforcrawlerweb");
        ArrayList<String> fileUrls = crawlerMapper.getFileList(crawlerDataQuery.getDataId(), crawlerDataQuery.getTableName());
        DataSourceContextHolder.clearDataSourceType();
        ArrayList<FileJson2> fileList = new ArrayList<>();
        for (int i = 0; i < fileUrls.size(); i++) {
            String fileUrl = fileUrls.get(i);
            if (fileUrl.equals("")) {
                break;
            }
            String fileName = fileUrl.substring(fileUrl.lastIndexOf("/") + 1);
            String suffix = fileUrl.substring(fileUrl.lastIndexOf(".") + 1);
            
            // 正则获取文件上传时间
            int year = 0;
            int month = 0;
            int day = 0;
            int hour = 0;
            int minute = 0;
            int second = 0;
            Pattern pattern = Pattern.compile("default/(\\d{4})(\\d{2})(\\d{2})/(\\d{1,2})/(\\d{1,2})/(\\d{1,2})/");
            Matcher matcher = pattern.matcher(fileUrl);
            while (matcher.find()) {
                year = Integer.parseInt(matcher.group(1));
                month = Integer.parseInt(matcher.group(2));
                day = Integer.parseInt(matcher.group(3));
                hour = Integer.parseInt(matcher.group(4));
                minute = Integer.parseInt(matcher.group(5));
                second = Integer.parseInt(matcher.group(6));
            }
            LocalDateTime localDateTime = LocalDateTime.of(year, month, day, hour, minute, second);
            String fileTime = localDateTime.format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"));

            fileList.add(new FileJson2(fileName, fileUrl, suffix, fileTime));
        }
        crawlerData.setFileList(fileList);
        
        // 处理时间戳格式为String格式
        String pubtime = crawlerData.getAddTime();
        pubtime = DateUtils.formatByHour(DateUtils.timestampToLocal(pubtime));
        crawlerData.setAddTime(pubtime);
        return crawlerData;
    }


    @Override
    public void updateCommit(CrawlerDataUpdateVO crawlerDataUpdateVO) {
        int result = 0;
        if (crawlerDataUpdateVO.getSpiderType() == 0) {
            result = crawlerDetailMapper.updateCommit(crawlerDataUpdateVO);
        } else if (crawlerDataUpdateVO.getSpiderType() == 5) {
            // 处理info中的相对路径为绝对路径
            String outurl = crawlerDataUpdateVO.getOuturl();
            String info = crawlerDataUpdateVO.getInfo();
            String completeInfo = UrlUtils.completeHref(info, outurl);
            crawlerDataUpdateVO.setInfo(completeInfo);
            
            // 处理tags为tag
            String tag = String.join("、", crawlerDataUpdateVO.getTags());
            crawlerDataUpdateVO.setTag(tag);
            
            // 主副表更新顺序一定是先主表，后副表，与数据预测沟通协商而定
            // 更新主表信息
            crawlerDataUpdateVO.setTableName("stang_zhengce");
            result = crawlerPolicyService.updateCommit(crawlerDataUpdateVO);
            // 更新副表信息
            crawlerDataUpdateVO.setTableName("stang_zhengce_info");
            int result3 = crawlerPolicyService.updateInfoCommit(crawlerDataUpdateVO);
            result = result == result3 ? 1 : 0;
        }
        DataSourceContextHolder.setDataSourceType("onlyforcrawlerweb");
        int result2 = crawlerService.updateStatus(crawlerDataUpdateVO.getDid(), 1);
        DataSourceContextHolder.clearDataSourceType();
        if (result != 1 || result2 != 1) {
            throw new BusinessException("修改失败");
        }
    }

    @Transactional
    @Override
    public void updateStatus(Integer did, Integer status) {
        int result = crawlerService.updateStatus(did, status);
        if (result != 1) {
            throw new BusinessException("修改失败");
        }
    }

    @Override
    public void uploadFile(Integer id, String tableName, MultipartFile[] files) {
        // 判断文件大小
        int fileSize = 0;
        for (MultipartFile file : files) {
            if (!file.isEmpty()) {
                long size = file.getSize();
                fileSize += size;
            }
        }
        if (tableName.equals("stang_bid_new") || tableName.equals("stang_cbid")) {
            if (fileSize > 30 * 1024 * 1024) {
                throw new BusinessException("文件总大小超过了允许的最大大小30MB");
            }
        } else if (tableName.equals("stang_zhengce")){
            if (fileSize > 10 * 1024 * 1024) {
                throw new BusinessException("文件总大小超过了允许的最大大小10MB");
            }
        }
        
        // 遍历上传文件
        ArrayList<String> fileList = new ArrayList<>();
        ArrayList<FileJson2> fileJson2List = new ArrayList<>();
        for (MultipartFile file : files) {
            // 上传文件得到下载链接
            String intro = GoFastUtils.upload(file);
            fileList.add(intro);
            fileJson2List.add(new FileJson2(file.getOriginalFilename(), intro));
        }

        if (fileList.size() > 0) {
            // 将链接填充至数据
            if (tableName.equals("stang_bid_new") || tableName.equals("stang_cbid")) {
                int result = crawlerDetailMapper.updateIntro(id, tableName, gson.toJson(fileList));
                // 将链接填充至go_fastdfs_file_url表以供ocr识别
                DataSourceContextHolder.setDataSourceType("onlyforcrawlerweb");
                int result2 = 0;
                for (String fileUrl : fileList) {
                    int status = crawlerMapper.insertPdfUrl(id, tableName, fileUrl);
                    result2 += status;
                }
                DataSourceContextHolder.clearDataSourceType();
                if (result != 1 || result2 != fileList.size()) {
                    throw new BusinessException(ResponseEnum.UPLOAD_ERROR);
                }
            } else if (tableName.equals("stang_zhengce")) {
                DataSourceContextHolder.setDataSourceType("onlyforcrawlerweb");
                // 获取原有的附件列表
                String oldIntro = crawlerMapper.getOldIntro(id, tableName);
                List<FileJson2> oldFileJson2 = null;
                if (oldIntro == null || oldIntro.equals("")) {
                    oldFileJson2 = new ArrayList<>();
                } else {
                    oldFileJson2 = Arrays.asList(gson.fromJson(oldIntro, FileJson2[].class));
                }
                List<FileJson2> newFileJson2 = new ArrayList<>(oldFileJson2);
                // 将新的文件list添加进老的文件list
                newFileJson2.addAll(fileJson2List);
                int result = crawlerMapper.updateIntro(id, tableName, gson.toJson(newFileJson2));
                DataSourceContextHolder.clearDataSourceType();
                if (result != 1) {
                    throw new BusinessException(ResponseEnum.UPLOAD_ERROR);
                }
            }
        }
    }

    @Override
    public void updateInfo(Integer id, String tableName, String info) {
        int result = crawlerDetailMapper.updateInfo(id, tableName, info);
        if (result != 1) {
            throw new BusinessException("修改失败");
        }
    }

    @Override
    public CrawlerDataQueryVO addData(AddDataQuery addDataQuery) {
        CrawlerDataQueryVO crawlerDataQueryVO = new CrawlerDataQueryVO();
        long addTime = System.currentTimeMillis() / 1000;
        addDataQuery.setAddTime(addTime);
        int result = 0;
        if (addDataQuery.getSpiderType() == 0) { //招中标爬虫
            result = crawlerDetailMapper.addData(addDataQuery);
        }
        if (result != 1) {
            throw new BusinessException("新增失败");
        }
        crawlerDataQueryVO.setDataId(addDataQuery.getId());
        crawlerDataQueryVO.setSpiderType(addDataQuery.getSpiderType());
        crawlerDataQueryVO.setTableName(addDataQuery.getTableName());
        return crawlerDataQueryVO;
    }

    @Override
    public CrawlerDataQueryVO addDataPolicy(AddDataPolicyQuery addDataPolicyQuery) {
        CrawlerDataQueryVO crawlerDataQueryVO = new CrawlerDataQueryVO();
        long addTime = System.currentTimeMillis() / 1000;
        addDataPolicyQuery.setAddTime(addTime);
        int result = 0;
        if (addDataPolicyQuery.getSpiderType() == 5) { //政策爬虫
            // 处理tags为tag
            String tag = String.join("、", addDataPolicyQuery.getTags());
            addDataPolicyQuery.setTag(tag);
            result = crawlerPolicyService.addPolicyData(addDataPolicyQuery);
        }
        if (result != 1) {
            throw new BusinessException("新增失败");
        }
        crawlerDataQueryVO.setDataId(addDataPolicyQuery.getId());
        crawlerDataQueryVO.setSpiderType(addDataPolicyQuery.getSpiderType());
        crawlerDataQueryVO.setTableName(addDataPolicyQuery.getTableName());
        return crawlerDataQueryVO;
    }

    @Override
    public PageBean<CrawlerDataListVO> listPageCrawlerData(CrawlerDataListQuery crawlerDataListQuery) {
        //判断入参，避免过量查询导致es和mysql卡死
        if (crawlerDataListQuery.getLimit() > 100) {
            throw new BusinessException("查询条数过大，请联系管理员");
        }

        if (crawlerDataListQuery.getPage() > 100) {
            throw new BusinessException("查询页数过大，请联系管理员");
        }

        if (StringUtils.isEmpty(crawlerDataListQuery.getAuthor()) || StringUtils.isEmpty(crawlerDataListQuery.getTableName())) {
            //author和tableName为空
            return new PageBean<>();
        }

        //手动切换数据源
        DataSourceContextHolder.setDataSourceType("onlyforcrawlerweb");
        if (!crawlerService.verifiedAuthor(crawlerDataListQuery.getAuthor())) {
            throw new BusinessException("爬虫不存在");
        }

        //手动切换数据源
        DataSourceContextHolder.clearDataSourceType();
        DataSourceContextHolder.setDataSourceType("suidaobig");

        //查询参数
        Integer page = crawlerDataListQuery.getPage();
        Integer limit = crawlerDataListQuery.getLimit();
        Integer from = (page - 1) * limit; //计算当前页码
        String pubtimeStart = crawlerDataListQuery.getPubtimeStart();
        String pubtimeEnd = crawlerDataListQuery.getPubtimeEnd();
        String addTimeStart = crawlerDataListQuery.getAddTimeStart();
        String addTimeEnd = crawlerDataListQuery.getAddTimeEnd();

        //从es中获取数据
        SearchRequest request = new SearchRequest("newccccltd").types("shipping");
        SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
        BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder();
        //来源和表名称
        boolQueryBuilder.must(QueryBuilders.matchPhraseQuery("author", crawlerDataListQuery.getAuthor()));
        boolQueryBuilder.must(QueryBuilders.matchPhraseQuery("table_name", crawlerDataListQuery.getTableName()));
        //发布时间
        if (!StringUtils.isEmpty(pubtimeStart) && !StringUtils.isEmpty(pubtimeEnd)) {
            RangeQueryBuilder rangeQueryBuilder = QueryBuilders.rangeQuery("pub_time");
            rangeQueryBuilder.gte(DateUtils.toEpochMilliByDateStart(pubtimeStart));
            rangeQueryBuilder.lte(DateUtils.toEpochMilliByDateEnd(pubtimeEnd));
            boolQueryBuilder.must(rangeQueryBuilder);
        }

        //入库时间，因为是从es查询的数据，所以只能筛选es的入库时间，mysql的入库时间无法筛选
        if (!StringUtils.isEmpty(addTimeStart) && !StringUtils.isEmpty(addTimeEnd)) {
            RangeQueryBuilder rangeQueryBuilder = QueryBuilders.rangeQuery("add_time");
            rangeQueryBuilder.gte(DateUtils.toEpochMilliBySecond(addTimeStart));
            rangeQueryBuilder.lte(DateUtils.toEpochMilliBySecond(addTimeEnd));
            boolQueryBuilder.must(rangeQueryBuilder);
        }

        //排序，根据pubtime或addtime排序，二选一
        HashMap orderBys = crawlerDataListQuery.getOrderBys();
        if (orderBys.get("orderColumn").equals("pubtime")) {
            if (orderBys.get("order").equals("ASC")) {
                sourceBuilder.sort("pub_time", SortOrder.ASC);
            } else if (orderBys.get("order").equals("DESC")) {
                sourceBuilder.sort("pub_time", SortOrder.DESC);
            }
        } else if (orderBys.get("orderColumn").equals("addTimeEs") || orderBys.get("orderColumn").equals("addTimeMysql")) {
            if (orderBys.get("order").equals("ASC")) {
                sourceBuilder.sort("add_time", SortOrder.ASC);
            } else if (orderBys.get("order").equals("DESC")) {
                sourceBuilder.sort("add_time", SortOrder.DESC);
            }
        }

        //只查询id和add_time字段
        String[] includes = {"id", "add_time"};
        String[] excludes = {};
        sourceBuilder.fetchSource(includes, excludes);

        //分页
        sourceBuilder.from(from);
        sourceBuilder.size(limit);

        //整合
        sourceBuilder.query(boolQueryBuilder);
        request.source(sourceBuilder);

        //接收
        PageBean<CrawlerDataListVO> result = new PageBean<>();
        ArrayList<CrawlerDataListVO> resultList = new ArrayList<>(); //响应结果
        ArrayList<Map<String, Object>> esList = new ArrayList<>();   //ES查询结果
        ArrayList<CrawlerDataVO> msqlList = new ArrayList<>();        //MYSQL查询结果
        try {
            //发起请求获取响应
            SearchResponse response = esClient.search(request, RequestOptions.DEFAULT);
            SearchHits hits = response.getHits();
            for (SearchHit hit : hits) {
                Map<String, Object> item = hit.getSourceAsMap();
                esList.add(item);
            }
            ArrayList<Integer> ids = new ArrayList<>();
            esList.forEach(item -> {
                ids.add((Integer) item.get("id"));
            });
            if (ids.size() > 0) {
                msqlList = crawlerDetailMapper.getCrawlerDataByList(crawlerDataListQuery.getTableName(), ids);
            }

            //分别取出esList和msqlList中的部分字段进行拼接
            ArrayList<CrawlerDataVO> finalMsqlList = msqlList;
            resultList = (ArrayList<CrawlerDataListVO>) esList.stream()
                    .filter(esEntity -> finalMsqlList.stream().anyMatch(msqlEntity -> msqlEntity.getDataId().equals((Integer) esEntity.get("id"))))
                    .map(esEntity -> {
                        CrawlerDataVO crawlerDataVO = finalMsqlList.stream().filter(msqlEntity1 -> msqlEntity1.getDataId().equals((Integer) esEntity.get("id"))).findFirst().orElse(new CrawlerDataVO());
                        return new CrawlerDataListVO(
                                (Integer) esEntity.get("id"),
                                crawlerDataVO.getAreaId(),
                                crawlerDataVO.getCateId(),
                                crawlerDataVO.getTitle(),
                                crawlerDataVO.getAuthor(),
                                crawlerDataVO.getPubtime(),
                                crawlerDataVO.getOuturl(),
                                crawlerDataVO.getLocation(),
                                crawlerDataVO.getIntro(),
                                crawlerDataVO.getFileList(),
                                crawlerDataVO.getInfo(),
                                Long.parseLong(crawlerDataVO.getAddTime()),
                                DateUtils.formatByHour(DateUtils.timestampToLocal(crawlerDataVO.getAddTime())),
                                DateUtils.formatByHour(DateUtils.timestampToLocal2(String.valueOf(esEntity.get("add_time"))))
                        );
                    })
                    .collect(Collectors.toList());

            //补充pdf链接字段
            DataSourceContextHolder.setDataSourceType("onlyforcrawlerweb");
            resultList.stream().forEach(entity -> entity.setFileList(getPdfUrl(entity.getDataId(), crawlerDataListQuery.getTableName())));
            DataSourceContextHolder.clearDataSourceType();

            //根据mysql入库时间进行排序
            if (orderBys.get("orderColumn").equals("addTimeMysql")) {
                if (orderBys.get("order").equals("ASC")) {
                    resultList = (ArrayList<CrawlerDataListVO>) resultList.stream().sorted(Comparator.comparing(CrawlerDataListVO::getAddTime)).collect(Collectors.toList());
                } else if (orderBys.get("order").equals("DESC")) {
                    resultList = (ArrayList<CrawlerDataListVO>) resultList.stream().sorted(Comparator.comparing(CrawlerDataListVO::getAddTime).reversed()).collect(Collectors.toList());
                }
            }

            //填充数据
            result.setList(resultList);
            result.setTotal(hits.getTotalHits());
            result.setPageNum(page);
            result.setPageSize(resultList.size());
        } catch (Exception e) {
            e.printStackTrace();
            throw new BusinessException("查询失败");
        }

        return result;
    }

    /**
     * 根据数据id获取go_fastdfs_file_url表的pdf下载链接
     *
     * @param tableId
     * @return
     */
    private List<FileJson2> getPdfUrl(Integer tableId, String tableName) {
        // 从pdf副表获取文件
        ArrayList<String> fileUrls = crawlerMapper.getFileList(tableId, tableName);
        ArrayList<FileJson2> fileList = new ArrayList<>();
        for (int i = 0; i < fileUrls.size(); i++) {
            String fileUrl = fileUrls.get(i);
            String fileName = fileUrl.substring(fileUrl.lastIndexOf("/") + 1);
            String suffix = fileUrl.substring(fileUrl.lastIndexOf(".") + 1);

            // 正则获取文件上传时间
            int year = 0;
            int month = 0;
            int day = 0;
            int hour = 0;
            int minute = 0;
            int second = 0;
            Pattern pattern = Pattern.compile("default/(\\d{4})(\\d{2})(\\d{2})/(\\d{1,2})/(\\d{1,2})/(\\d{1,2})/");
            Matcher matcher = pattern.matcher(fileUrl);
            while (matcher.find()) {
                year = Integer.parseInt(matcher.group(1));
                month = Integer.parseInt(matcher.group(2));
                day = Integer.parseInt(matcher.group(3));
                hour = Integer.parseInt(matcher.group(4));
                minute = Integer.parseInt(matcher.group(5));
                second = Integer.parseInt(matcher.group(6));
            }
            LocalDateTime localDateTime = LocalDateTime.of(year, month, day, hour, minute, second);
            String fileTime = localDateTime.format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"));
            
            fileList.add(new FileJson2(fileName, fileUrl, suffix, fileTime));
        }
        return fileList;
    }
}
