package com.fly.elasticsearch.service.impl;

import com.alibaba.fastjson.JSONObject;
import com.fly.elasticsearch.entity.JobDetail;
import com.fly.elasticsearch.service.JobFullTextService;
import org.apache.http.HttpHost;
import org.apache.lucene.search.TotalHits;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchScrollRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.MultiMatchQueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightField;

import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

public class JobFullTextServiceImpl implements JobFullTextService {
    // 索引库的名字
    private static final String JOB_IDX = "job_index";

    private RestHighLevelClient restHighLevelClient;

    /*
       建立与ES的连接
       1. 使用RestHighLevelClient构建客户端连接
       2. 基于RestClient.builder方法来构建RestClientBuilder
       3. 用HttpHost来添加ES的结点
     */
    public JobFullTextServiceImpl() {
        RestClientBuilder restClientBuilder = RestClient.builder(
                new HttpHost("192.168.199.144", 9200, "http")
//                ,new HttpHost("", 9200, "http")
//                ,new HttpHost("", 9200, "http")
        );
        restHighLevelClient = new RestHighLevelClient(restClientBuilder);
    }

    /*
        1. 构建 IndexRequest对象, 用来描述ES发起请求的数据
        2. 设置文档ID
        3. 使用FastJSON将实体类对象转换为JSON
        4. 使用IndexRequest.source方法设置文档数据, 并设置请求的数据为JSON格式
        5. 使用RestHighLevelClient调用index方法发起请求,并将一个文档添加到索引中
     */
    @Override
    public void add(JobDetail jobDetail) throws IOException {
        IndexRequest indexRequest = new IndexRequest(JOB_IDX);
        indexRequest.id(jobDetail.getId() + "");
        String json = JSONObject.toJSONString(jobDetail);
        indexRequest.source(json, XContentType.JSON);
        restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT);
    }

    /*
       1. 构建GetRequest请求
       2. 使用RestHighLevelClient.get发送GetRequest请求, 并获取到ES服务器的响应
       3. 将ES响应的数据转换为JSON字符串
       4. 并使用FastJSON将JSON字符串转换为JobDetail类对象
       5. 记得: 单独设置ID
     */
    @Override
    public JobDetail findById(long id) throws IOException {
        GetRequest getRequest = new GetRequest(JOB_IDX, id + "");
        GetResponse getResponse = restHighLevelClient.get(getRequest, RequestOptions.DEFAULT);
        String json = getResponse.getSourceAsString();
        JobDetail jobDetail = JSONObject.parseObject(json, JobDetail.class);
        jobDetail.setId(id);
        return jobDetail;
    }

    /*
       1. 判断ID 对应的文档是否存在
            a. 构建GetRequest
            b. 使用RestHighLevelClient.exists方法,发起请求, 判断是否存在
       2. 构建UpdateRequest请求
       3. 设置UpdateRequest的文档, 并配置为JSON格式
       4. 执行RestHighLevelClient.update方法,发起请求
     */
    @Override
    public void update(JobDetail jobDetail) throws IOException {
        GetRequest getRequest = new GetRequest(JOB_IDX, jobDetail.getId() + "");
        boolean exists = restHighLevelClient.exists(getRequest, RequestOptions.DEFAULT);
        if (exists) {
            UpdateRequest updateRequest = new UpdateRequest(JOB_IDX, jobDetail.getId() + "");
            updateRequest.doc(JSONObject.toJSONString(jobDetail), XContentType.JSON);
            restHighLevelClient.update(updateRequest, RequestOptions.DEFAULT);
        }
    }

    /*
        1. 构建delete请求
        2. 使用RestHighLevelClient执行delete请求
     */
    @Override
    public void deleteById(long id) throws IOException {
        DeleteRequest deleteRequest = new DeleteRequest(JOB_IDX, id + "");
        restHighLevelClient.delete(deleteRequest, RequestOptions.DEFAULT);
    }

    /*
        1. 构建SearchRequest 检索请求, 用来进行全文检索/关键字检索
        2. 创建一个SearchSourceBuilder专门用于构建查询条件
        3. 使用QueryBuilders.multiMatchQuery构建一个查询条件(搜索title/jd), 并配置到SearchSourceBuilder
        4. 调用SearchRequest.source将查询条件设置到检索请求
        5. 执行RestHighLevelClient.search发起请求
        6. 遍历结果: 反序列化,设置文档ID
     */
    @Override
    public List<JobDetail> searchByKeywords(String keywords) throws IOException {
        SearchRequest searchRequest = new SearchRequest(JOB_IDX);

        SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
        MultiMatchQueryBuilder multiMatchQueryBuilder = QueryBuilders.multiMatchQuery(keywords, "title", "jd");

        searchSourceBuilder.query(multiMatchQueryBuilder);
        searchRequest.source(searchSourceBuilder);

        SearchResponse searchResponse = restHighLevelClient.search(searchRequest, RequestOptions.DEFAULT);
        SearchHits hits = searchResponse.getHits();
        SearchHit[] hitArray = hits.getHits();

        ArrayList<JobDetail> jobDetailArrayList = new ArrayList<>();

        for (SearchHit documentFields : hitArray) {
            String json = documentFields.getSourceAsString();
            JobDetail jobDetail = JSONObject.parseObject(json, JobDetail.class);
            jobDetail.setId(Long.parseLong(documentFields.getId()));
            jobDetailArrayList.add(jobDetail);
        }
        return jobDetailArrayList;
    }

    /*
       1. 构建SearchRequest 检索请求, 用来进行全文检索/关键字检索
        2. 创建一个SearchSourceBuilder专门用于构建查询条件
        3. 使用QueryBuilders.multiMatchQuery构建一个查询条件(搜索title/jd), 并配置到SearchSourceBuilder
                设置每页显示多少条
                设置从第几条开始查询
        4. 调用SearchRequest.source将查询条件设置到检索请求
        5. 执行RestHighLevelClient.search发起请求
        6. 遍历结果: 反序列化,设置文档ID
        7. 将结果封装到Map结构中, 带有分页信息
            a. total -> 获取所有记录数
            b. content -> 当前分页中的数据
     */
    @Override
    public Map<String, Object> searchByPage(String keywords, int pageNum, int pageSize) throws IOException {
        SearchRequest searchRequest = new SearchRequest(JOB_IDX);

        SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
        MultiMatchQueryBuilder multiMatchQueryBuilder = QueryBuilders.multiMatchQuery(keywords, "title", "jd");

        searchSourceBuilder.query(multiMatchQueryBuilder);

        searchSourceBuilder.size(pageSize);
        searchSourceBuilder.from((pageNum - 1) * pageSize);

        searchRequest.source(searchSourceBuilder);

        SearchResponse searchResponse = restHighLevelClient.search(searchRequest, RequestOptions.DEFAULT);
        SearchHits hits = searchResponse.getHits();
        SearchHit[] hitArray = hits.getHits();

        ArrayList<JobDetail> jobDetailArrayList = new ArrayList<>();

        for (SearchHit documentFields : hitArray) {
            String json = documentFields.getSourceAsString();
            JobDetail jobDetail = JSONObject.parseObject(json, JobDetail.class);
            jobDetail.setId(Long.parseLong(documentFields.getId()));
            jobDetailArrayList.add(jobDetail);
        }
        TotalHits totalHits = searchResponse.getHits().getTotalHits();
        long totalNum = totalHits.value;
        HashMap hashMap = new HashMap();
        hashMap.put("total", totalNum);
        hashMap.put("content", jobDetailArrayList);
        return hashMap;
    }

    /*
       1. 构建SearchRequest 检索请求, 用来进行全文检索/关键字检索
        2. 创建一个SearchSourceBuilder专门用于构建查询条件
        3. 使用QueryBuilders.multiMatchQuery构建一个查询条件(搜索title/jd), 并配置到SearchSourceBuilder
                给请求设置高亮
                设置每页显示多少条
                设置从第几条开始查询
        4. 调用SearchRequest.source将查询条件设置到检索请求
                设置scroll查询
        5. 执行RestHighLevelClient.search发起请求
        ============================
            滚动id不为 空:
            1. 构造滚动查询对象
            2. 设置scroll查询( 设置滚动时间分钟数)
            3. 使用 RestHighLevelClient发送scroll请求
        ============================
        6. 遍历结果: 反序列化,设置文档ID
                将这些高亮片段拼接成一个完整的高亮字段 设置到实体类中
        7. 将结果封装到Map结构中, 带有分页信息
            a. total -> 获取所有记录数
            b. content -> 当前分页中的数据
     */
    @Override
    public Map<String, Object> searchByScrollPage(String keywords, String scrollId, int pageSize) throws IOException {
        SearchResponse searchResponse = null;
        if (scrollId == null) {
            SearchRequest searchRequest = new SearchRequest(JOB_IDX);
            SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
            MultiMatchQueryBuilder multiMatchQueryBuilder = QueryBuilders.multiMatchQuery(keywords, "title", "jd");
            searchSourceBuilder.query(multiMatchQueryBuilder);

            HighlightBuilder highlightBuilder = new HighlightBuilder();
            highlightBuilder.field("title");
            highlightBuilder.field("jd");
            highlightBuilder.field("<font color='red'>");
            highlightBuilder.postTags("</font>");

            searchSourceBuilder.highlighter(highlightBuilder);

            searchSourceBuilder.size(pageSize);
            searchRequest.source(searchSourceBuilder);

            searchRequest.scroll(TimeValue.timeValueMinutes(5));

            searchResponse = restHighLevelClient.search(searchRequest, RequestOptions.DEFAULT);
        } else {
            SearchScrollRequest searchScrollRequest = new SearchScrollRequest(scrollId);
            searchScrollRequest.scroll(TimeValue.timeValueMinutes(5));
            searchResponse = restHighLevelClient.scroll(searchScrollRequest, RequestOptions.DEFAULT);
        }

        SearchHits hits = searchResponse.getHits();
        SearchHit[] hitArray = hits.getHits();

        ArrayList<JobDetail> jobDetailArrayList = new ArrayList<>();

        for (SearchHit documentFields : hitArray) {
            String json = documentFields.getSourceAsString();
            JobDetail jobDetail = JSONObject.parseObject(json, JobDetail.class);
            jobDetail.setId(Long.parseLong(documentFields.getId()));
            jobDetailArrayList.add(jobDetail);

            Map<String, HighlightField> highlightFields = documentFields.getHighlightFields();
            HighlightField titleHL = highlightFields.get("title");
            HighlightField jdHL = highlightFields.get("jd");

            if (titleHL != null) {
                Text[] fragments = titleHL.getFragments();

                StringBuilder builder = new StringBuilder();
                for (Text text : fragments) {
                    builder.append(text);
                }
                jobDetail.setTitle(builder.toString());
            }

            if (jdHL != null) {
                Text[] fragments = jdHL.getFragments();
                StringBuilder builder = new StringBuilder();
                for (Text text : fragments) {
                    builder.append(text);
                }
                jobDetail.setJd(builder.toString());
            }
        }
        long totalNum = searchResponse.getHits().getTotalHits().value;
        HashMap hashMap = new HashMap();
        hashMap.put("scroll_id", searchResponse.getScrollId());
        hashMap.put("content", jobDetailArrayList);
        return hashMap;
    }

    @Override
    public void close() throws IOException {
        restHighLevelClient.close();
    }
}
