package com.eshore.textanalyzer.elk.service.impl;

import com.eshore.textanalyzer.elk.common.ESRestClient;
import com.eshore.textanalyzer.elk.common.EsState;
import com.eshore.textanalyzer.elk.common.HpEsConnect;
import com.eshore.textanalyzer.elk.common.ResponseMessage;
import com.eshore.textanalyzer.elk.dao.TagHotwordsDao;
import com.eshore.textanalyzer.elk.dao.TagRxOrderDao;
import com.eshore.textanalyzer.elk.pojo.Paper;
import com.eshore.textanalyzer.elk.pojo.TagEsState;
import com.eshore.textanalyzer.elk.pojo.TagHotwords;
import com.eshore.textanalyzer.elk.pojo.ToEsData;
import com.eshore.textanalyzer.elk.service.ElkService;
import com.eshore.textanalyzer.elk.service.HotwordsService;
import com.eshore.textanalyzer.elk.service.TagEsStateService;
import com.eshore.textanalyzer.elk.utils.BulkProcessorUtil;
import com.eshore.textanalyzer.elk.utils.DBHepler;
import lombok.extern.slf4j.Slf4j;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.client.core.CountRequest;
import org.elasticsearch.client.core.CountResponse;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.reindex.BulkByScrollResponse;
import org.elasticsearch.index.reindex.DeleteByQueryRequest;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;
import org.springframework.util.ObjectUtils;

import java.io.IOException;
import java.sql.*;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.Date;
import java.util.concurrent.TimeUnit;

/**
 * @author xyd
 * @date 2021/5/17
 * 实现 elk操作接口
 * 这里是针对 hp_big_data 数据开发的接口
 * 与ElkServiceImpl不同的是不配置动态处理
 * hpdata 数据操作
 */
@Slf4j
@Qualifier
@Service("HpElkServiceImpl")
public class HpElkServiceImpl implements ElkService, HotwordsService {
    // 注入hpdata dao
    @Autowired
    private TagRxOrderDao tagRxOrderDao;

    @Autowired
    private TagHotwordsDao tagHotwordsDao;

    // 状态表操作注入
    @Autowired
    private TagEsStateService tagEsStateService;

    // 带密码配置的es
    @Autowired
    private ESRestClient restClient;

    /**
     * 根据文本 检索数据 返回高亮值
     *
     * @param page
     * @param size
     * @param texts
     * @return
     * @throws IOException
     */
    @Override
    public Paper<Map<String, String>> getEsDataList(int page, int size, String texts) throws IOException {
        return null;
    }

    /**
     * 根据文本 检索数据 返回文档信息
     * hpdata 多字段查询
     * 对应黄埔返回数据 只展示act_tag_item_values高亮
     *
     * @param page
     * @param size
     * @param texts
     * @return
     * @throws IOException
     */
    @Override
    public ResponseMessage getEsDataListHp(int page, int size, String texts) {
        // 分页参数 初始化
        if (ObjectUtils.isEmpty(page)) {
            page = 1;
        }
        if (ObjectUtils.isEmpty(size)) {
            size = 10;
        }
        // offset表示从哪里开始 size表示查多少条数据
        int offset = (page - 1) * size;
        // 获取client对象
        RestHighLevelClient client = restClient.getClient();
        // 获取查询对象
        SearchRequest request = new SearchRequest();
        // 指定查询的索引mydata
        request.indices(HpEsConnect.INDEX_NAME);
        // 创建 组合条件 对象
        SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
        // 创建一个 组合查询的 多字段 and or
        BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder();
        boolQueryBuilder.should(QueryBuilders.matchQuery("act_tag_item_values",texts));
        boolQueryBuilder.should(QueryBuilders.matchQuery("thing_subject",texts));
        boolQueryBuilder.should(QueryBuilders.matchQuery("title",texts));
        // 分页
        sourceBuilder.from(offset);
        sourceBuilder.size(size);
        // 条件
        sourceBuilder.query(boolQueryBuilder);
        System.out.println("检索参数：" + sourceBuilder.toString());
        try {
            // 执行搜索
            request.source(sourceBuilder);
            SearchResponse response = client.search(request, RequestOptions.DEFAULT);
            SearchHit[] hits = response.getHits().getHits();
            // 获取查询总数
            CountRequest countRequest = new CountRequest(HpEsConnect.INDEX_NAME);
            SearchSourceBuilder sourceBuilder1 = new SearchSourceBuilder();
            sourceBuilder1.query(QueryBuilders.matchAllQuery());
            // 汇总条件 根据act_tag_item_values作为汇总条件
            sourceBuilder1.query(QueryBuilders.matchQuery("act_tag_item_values",texts));
            countRequest.source(sourceBuilder);
            CountResponse countResponse = client.count(countRequest,RequestOptions.DEFAULT);
            long count = countResponse.getCount();
            // 将高亮数据添加进来
            List<Map<String,Object>> list = new ArrayList<Map<String,Object>>();
            for (SearchHit hit:hits) {
                // 获取高亮的数据
                Map<String, Object> map = hit.getSourceAsMap();
                list.add(map);
            }
            // 封装分页
            Paper<Map<String,Object>> listPaper = new Paper<Map<String,Object>>();
            listPaper.setData(list);
            listPaper.setCount(count);
            listPaper.setPage(page);
            listPaper.setSize(size);

            return ResponseMessage.createBySuccess(listPaper);
        } catch (IOException e) {
            e.printStackTrace();
            return ResponseMessage.createByError("数据查询失败");
        }
    }

    /**
     * 根据索引 文档_id 查询文档信息
     *
     * @param id
     * @return
     * @throws IOException
     */
    @Override
    public List<Map<String, Object>> getSearchEsById(String id) throws IOException {
        return null;
    }

    /**
     * 批量从mysql ===> es
     *
     * @param toEsData
     * @return
     * @throws SQLException
     */
    @Async("taskExecutor")
    @Override
    public ResponseMessage writeMysqlDataToEs(ToEsData toEsData,TagEsState tagEsState) throws SQLException {
        // 获取client对象
        RestHighLevelClient client = restClient.getClient();
        long startTime = System.currentTimeMillis();
        BulkProcessorUtil bulkProcessorUtil = new BulkProcessorUtil();
        BulkProcessor bulkProcessor = bulkProcessorUtil.getBulkProcessor(client,tagEsState);
        // 数据库连接 后续封装
        Connection connection = null;
        PreparedStatement preparedStatement = null;
        ResultSet resultSet = null;

        // 获取connection对象
        connection = DBHepler.getConn(toEsData);
        log.info("=====开始导数=====");
        String sql = toEsData.getSql();

        preparedStatement = connection.prepareStatement(sql,ResultSet.TYPE_FORWARD_ONLY,ResultSet.TYPE_FORWARD_ONLY);
        preparedStatement.setFetchSize(Integer.MIN_VALUE);
        resultSet = preparedStatement.executeQuery();
        ResultSetMetaData colData = resultSet.getMetaData();

        // 定义数据结构
        ArrayList<HashMap<String, String>> dataList = new ArrayList<HashMap<String, String>>();
        //bulkProcessor 添加数据的方式不多，查看官文api发现其中map键值对的方式，因此需要先将查出来的数据转换成hashmap的方式
        HashMap<String, String> map = null;
        String c = null;
        String v = null;
        int count = 0;
        while (resultSet.next()) {
            count++;
            map = new HashMap<String,String>(120);
            // 遍历将数据转为map
            for (int i = 1; i<= colData.getColumnCount(); i++) {
                c = colData.getColumnName(i);
                v = resultSet.getString(c);
                map.put(c,v);
            }
            dataList.add(map);

            // 每10w提交一次
            if (count % 100000 == 0) {
                log.info("Mysql handle data number : " + count);
                // 将数据添加到
                // 遍历集合 批量导数
                for (HashMap<String,String> hashMap:dataList) {
                    // 一次性导入数据
                    bulkProcessor.add(new IndexRequest(toEsData.getIndexName().toLowerCase()).source(hashMap));
                }
                // 每提交一次便将map与list清空
                map.clear();
                dataList.clear();
            }
        }
        // 处理未提交的数据
        for (HashMap<String,String> hashMap:dataList) {
            bulkProcessor.add(new IndexRequest(toEsData.getIndexName().toLowerCase()).source(hashMap));
        }
        // 将数据刷新到es 取决于bulkProcessor设置的刷新时间
        bulkProcessor.flush();
        try {
            String end_time = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date());
            boolean terminatedFlag = bulkProcessor.awaitClose(250L, TimeUnit.SECONDS);
            restClient.closeClient();
            log.info("====数据同步时间/s====：" + (System.currentTimeMillis() - startTime)/1000 + "s");
            log.info(String.valueOf(terminatedFlag));
            tagEsState.setExecutingState(EsState.SUCCEED);
            tagEsState.setExecutingTime((System.currentTimeMillis() - startTime)/1000 + "s");
            tagEsState.setEndTime(end_time);
            tagEsStateService.updateState(tagEsState);
            return ResponseMessage.createBySuccess();
        } catch (Exception e) {
            e.printStackTrace();
            log.error(e.getMessage());
            return ResponseMessage.createByError("系统出现异常");
        } finally {
            resultSet.close();
            preparedStatement.close();
            connection.close();
        }
    }

    /**
     * 清空指定索引下的数据
     * indexName：hpdata
     * 该逻辑处理写在导入数据之前 === 先清空再导入
     * @param indexName 索引名称
     * @return
     */
    @Async("taskExecutor")
    @Override
    public ResponseMessage clearEsIndexData(String indexName, String business_type,TagEsState tagEsState) {
        log.info("====正在清空===" + indexName + "===索引===" + business_type + "的数据====");
        // 获取client对象
        RestHighLevelClient client = restClient.getClient();
        DeleteByQueryRequest request = new DeleteByQueryRequest();
        request.indices(indexName);
        switch (business_type) {
            case "all":
                request.setQuery(QueryBuilders.matchAllQuery());// 清空全部
                break;
            default:
                request.setQuery(QueryBuilders.termQuery("create_time_month_1",business_type));// 按照月份清空
        }
        // 使用listener监听是否成功 无返回值
        ActionListener listener = new ActionListener<BulkByScrollResponse>() {
            // 监听成功响应
            @Override
            public void onResponse(BulkByScrollResponse response) {
                log.info("deleteByQueryBigData success!,result：" + response.toString());
                log.info("清空成功");
            }
            // 监听失败响应
            @Override
            public void onFailure(Exception e) {
                e.printStackTrace();
                // 这里做日志状态更新
                log.error("deleteByQueryBigData failed,result：" + request.toString());
            }
        };
        client.deleteByQueryAsync(request, RequestOptions.DEFAULT,listener);
        return ResponseMessage.createBySuccess();
    }

    /**
     * 获取词频数据
     * 按照指定月份入库 es ====> mysql
     * 词频表
     * @param time_month
     * @return
     */
    @Override
    public ResponseMessage getHotWordsByMonth(String time_month) {
        // 初始化数据库的值
        ToEsData toEsData = new ToEsData();
        toEsData.setUrl(HpEsConnect.URL);
        toEsData.setUserName(HpEsConnect.USER_NAME);
        toEsData.setPassword(HpEsConnect.PASSWORD);
        toEsData.setClassName(HpEsConnect.CLASS_NAME);
        toEsData.setIndexName(HpEsConnect.INDEX_NAME);
        // 数据库连接
        Connection connection = null;
        PreparedStatement preparedStatement = null;
        ResultSet resultSet = null;
        // 获取connection对象
        connection = DBHepler.getConn(toEsData);
        log.info("=====开始获取热词=====");
        String sql = "select * from tag_stop_dic_config";
        ArrayList<String> dataList = new ArrayList<String>();
        try{
            preparedStatement = connection.prepareStatement(sql,ResultSet.TYPE_FORWARD_ONLY,ResultSet.TYPE_FORWARD_ONLY);
            preparedStatement.setFetchSize(Integer.MIN_VALUE);
            resultSet = preparedStatement.executeQuery();
            ResultSetMetaData colData = resultSet.getMetaData();
            String c = null;
            String v = null;
            int count = 0;
            while (resultSet.next()) {
                count++;
                // 遍历数据存入list
                for (int i = 1; i<= colData.getColumnCount(); i++) {
                    c = colData.getColumnName(i);
                    v = resultSet.getString(c);
                    dataList.add(v);
                }
            }
            // es查询
            RestHighLevelClient client = restClient.getClient();
            // 查询对象
            SearchRequest request = new SearchRequest();
            // 条件绑定
            SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
            // 查询条件
            sourceBuilder.query(QueryBuilders.termQuery("create_time_month_1",time_month));
            // 聚合对象
            // 聚合模板 暂时不优化先
            TermsAggregationBuilder aggr_item_values = AggregationBuilders.terms("act_tag_item_values");
            TermsAggregationBuilder aggr_title = AggregationBuilders.terms("title");
            TermsAggregationBuilder aggr_thing_subject = AggregationBuilders.terms("thing_subject");
            // 条件 -- 从三个条件中
            aggr_item_values.field("act_tag_item_values");
            aggr_item_values.size(3000);
            aggr_title.field("title");
            aggr_title.size(3000);
            aggr_thing_subject.field("thing_subject");
            aggr_thing_subject.size(3000);
            // 聚合过滤条件include只包含 -- 只匹配两个字符以上 exclude不包含 -- 清除带数字的
            IncludeExclude includeExclude = new IncludeExclude(".*[\u4E00-\u9FA5][\u4E00-\u9FA5].*",".*[(0-9)].*");
            aggr_item_values.includeExclude(includeExclude);
            aggr_title.includeExclude(includeExclude);
            aggr_thing_subject.includeExclude(includeExclude);
            // 封装条件
            sourceBuilder.aggregation(aggr_item_values);
            sourceBuilder.aggregation(aggr_title);
            sourceBuilder.aggregation(aggr_thing_subject);
            request.source(sourceBuilder);
            // 获取 响应 数据
            SearchResponse response = client.search(request, RequestOptions.DEFAULT);
            // 获取聚合后的响应值
            Aggregations aggregations = response.getAggregations();
            Terms item_values = aggregations.get("act_tag_item_values");
            Terms title = aggregations.get("title");
            Terms thing_subject = aggregations.get("thing_subject");
            List<? extends Terms.Bucket> buckets_item_values = item_values.getBuckets();
            List<? extends Terms.Bucket> buckets_title = title.getBuckets();
            List<? extends Terms.Bucket> buckets_thing_subject = thing_subject.getBuckets();
            // 因为map输出是无序的，为了保证输出是有序的，因此使用链表结构LinkedHashMap
            HashMap<String, Long> map = new LinkedHashMap<>();
            // 规避一个风险 有一点点效果 就是关键词词频出现多的放在前面，避免被后续插入去重的时候覆盖
            buckets_thing_subject.forEach(el -> {
                map.put(el.getKeyAsString(),el.getDocCount());
            });
            buckets_title.forEach(el -> {
                map.put(el.getKeyAsString(),el.getDocCount());
            });
            buckets_item_values.forEach(el -> {
                map.put(el.getKeyAsString(),el.getDocCount());
            });
            // 判断map与list是否匹配 匹配则删除map （自定义的规则）
            // 不能使用for循环删除map 会抛出异常 请使用迭代器
            Iterator<Map.Entry<String, Long>> it = map.entrySet().iterator();
            while (it.hasNext()) {
                Map.Entry<String, Long> entry = it.next();
                for (int i=0;i<dataList.size();i++) {
                    // 当map与datalist一致的时候，删除map的值 不一致则继续
                    if (dataList.get(i).equals(entry.getKey())) {
                        // 这里删除map 使用迭代器索引删除的方式
                        it.remove();
                        // 已经删除后跳出循环
                        break;
                    } else {
                        continue;
                    }
                }
            }
            // 关闭释放资源
            resultSet.close();
            preparedStatement.close();
            connection.close();
            return ResponseMessage.createBySuccess(map);
        }catch (IOException | SQLException e) {
            e.printStackTrace();
            return ResponseMessage.createByError(e.getMessage());
        }
    }

    /**
     * 批量新增热词数据
     * @param hotwordsMap 批量插入的数据
     * @param time_month 月份
     * 先根据月份删除 再添加
     * @return
     */
    @Override
    public int insertBatch(Map<String,Long> hotwordsMap,String time_month) {
        List<TagHotwords> hotwordsList = new ArrayList<>();
        // 获取当前时间
        String update_time = new SimpleDateFormat("yyyyMMdd").format(new Date());
        // 数据封装成List<TagHotwords>
        for (Map.Entry<String,Long> k:hotwordsMap.entrySet()) {
            TagHotwords thw = new TagHotwords();
            thw.setHotWords(k.getKey());
            thw.setHotNumber(String.valueOf(k.getValue()));
            thw.setTimeMonth(time_month);
            thw.setUpdateTime(update_time);
            hotwordsList.add(thw);
        }
        // 新增前先删除
        int delCode = tagHotwordsDao.deleteByMonth(time_month);//大于等于0
        int addCode = tagHotwordsDao.insertBatch(hotwordsList);//大于等于0
        System.out.println("批量删除===" + delCode + "条");
        System.out.println("批量插入===" + addCode + "条");
        return addCode;
    }

    /**
     * 获取热词信息
     * @param time_month 月份 yyyy-MM
     * @return
     */
    @Override
    public List<TagHotwords> getHotwords(String time_month) {
        List<TagHotwords> tagHotwordsList = tagHotwordsDao.selectAllDesc(time_month);
        return tagHotwordsList;
    }
}
