package com.es.search.test;

import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;

import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightField;
import org.elasticsearch.xcontent.XContentType;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.stereotype.Component;
import org.springframework.util.StringUtils;

import com.alibaba.fastjson.JSON;

@SuppressWarnings("deprecation")
@Component
public class HtmlParseUtil {
	@Autowired
	private RestHighLevelClient restHighLevelClient;
	@Bean
	public void test() {
		try {
			//DeleteIndexRequest request = new DeleteIndexRequest("jd_goods");
	        //restHighLevelClient.indices().delete(request, RequestOptions.DEFAULT);
			//根据关键词查询，把读取的数据放入es中
			//parseContent("手机");
			searchContentHighlighter("双卡双待",1,20);
		} catch (Exception e) {
			e.printStackTrace();
		}
	}

	/**
	 * 关键字获取商品信息
	 * @param keyword
	 * @return
	 * @throws Exception
	 */
	public static List<Content> parseJD(String keyword) throws Exception {
		String url = "https://search.jd.com/Search?keyword=" + keyword;
		// 1.使用Jsoup解析网页，获得网页的文档，这个document就相当于网页的html代码了
		Document document = Jsoup.parse(new URL(url), 30000);
		// 2.根据id获取元素，和原生js方法一样
		Element element = document.getElementById("J_goodsList");
		// 3.在当前id元素下获取所有的li标签
		Elements elements = element.getElementsByTag("li");

		List<Content> contentList = new ArrayList<Content>();

		// 4.遍历所有的li标签，获取我们需要的数据
		for (Element el : elements) {
			// 因为京东对商品图片采用了懒加载，所以直接读src属性是读不到路径的，要读data-lazy-img这个属性
			String imgUrl = el.getElementsByTag("img").eq(0).attr("data-lazy-img");
			String price = el.getElementsByClass("p-price").eq(0).text();
			String title = el.getElementsByClass("p-name").eq(0).text();
			// 封装数据
			Content content = new Content();
			content.setImg(imgUrl);
			content.setPrice(price);
			content.setTitle(title);
			contentList.add(content);
		}
		System.out.println("Jsoup解析网页-end!");
		return contentList;
	}

	//根据关键词查询，把读取的数据放入es中
	public Boolean parseContent(String keywords) throws Exception {
		
		// 通过工具类获取到数据
		List<Content> contentList = HtmlParseUtil.parseJD(keywords);

		BulkRequest bulkRequest = new BulkRequest();
		bulkRequest.timeout(TimeValue.timeValueMinutes(2));
		bulkRequest.timeout("2m");

		for (int i = 0; i < contentList.size(); i++) {
			Content content=contentList.get(i);
			if (StringUtils.hasLength(content.getImg()) && StringUtils.hasLength(content.getTitle())
					&& StringUtils.hasLength(content.getPrice())) {
				bulkRequest.add(new IndexRequest("jd_goods").source(JSON.toJSONString(content), XContentType.JSON));
			}else {
				System.out.println(content.getImg()+"|"+content.getTitle()+"|"+content.getPrice());
			}
		}
		// 将数据放入ES中
		BulkResponse bulkResponse = restHighLevelClient.bulk(bulkRequest, RequestOptions.DEFAULT);

		return !bulkResponse.hasFailures();
	}
	
	//数据存进去了，下面就是查询数据了
	public List<Map<String, Object>> searchContentPage(String keyword, int pageNo, int pageSize) throws Exception {
		// 1.创建查询请求
		SearchRequest searchRequest = new SearchRequest("jd_goods");

		// 2.创建searchSourceBuilder
		SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
		// 分页设置
		searchSourceBuilder.from(pageNo);
		searchSourceBuilder.size(pageSize);

		// 3.创建查询条件
		TermQueryBuilder termQuery = QueryBuilders.termQuery("title", keyword);
		searchSourceBuilder.query(termQuery);

		// 4.将searchSourceBuilder放入查询请求中
		searchRequest.source(searchSourceBuilder);
		// 5.发送查询请求，返回响应结果
		SearchResponse searchResponse = restHighLevelClient.search(searchRequest, RequestOptions.DEFAULT);

		// 6.解析结果
		List<Map<String, Object>> list = new ArrayList<>();
		for (SearchHit documentFields : searchResponse.getHits().getHits()) {
			list.add(documentFields.getSourceAsMap());
		}
		return list;
	}
	
	//那为了使我们所查询的关键性高亮，所以要在上面的普通查询的代码中增加高亮的逻辑
	public List<Map<String, Object>> searchContentHighlighter(String keyword, int pageNo, int pageSize) throws Exception{
        // 1.创建查询请求
        SearchRequest searchRequest = new SearchRequest("jd_goods");

        // 2.创建searchSourceBuilder
        SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
        searchSourceBuilder.timeout(new TimeValue(60, TimeUnit.SECONDS));

        // 分页设置
        searchSourceBuilder.from(pageNo);
        searchSourceBuilder.size(pageSize);

        // 3. 查询条件构建
        TermQueryBuilder termQuery = QueryBuilders.termQuery("title", keyword);
        searchSourceBuilder.query(termQuery);// 放入查询条件

        // 高亮构建
        HighlightBuilder highlightBuilder = new HighlightBuilder();
        highlightBuilder.field("title");
        highlightBuilder.requireFieldMatch(false);
        highlightBuilder.preTags("<span style='color:red'>");
        highlightBuilder.postTags("</span>");
        searchSourceBuilder.highlighter(highlightBuilder); // 放入高亮条件

        //4.将searchSourceBuilder放入查询请求中
        searchRequest.source(searchSourceBuilder);
        //5.发送查询请求，返回响应结果
        SearchResponse searchResponse = restHighLevelClient.search(searchRequest, RequestOptions.DEFAULT);

        // 6.解析结果
        List<Map<String, Object>> list = new ArrayList<>();
        for (SearchHit hit : searchResponse.getHits().getHits()) {

            // *****************高亮字段替换  start *******************

            // 从kibana的控制台高亮查询可以发现，高亮字段是放到highlight这个标签里面的，而source字段里面放的就是纯数据
            // 所以我们为了使输出的source带上高亮的效果，就需要把highlight标签里的高亮字段内容，替换掉source里面的字段内容

            Map<String, Object> source = hit.getSourceAsMap();// 首先先获取source的数据内容，因为下面要对它进行替换

            Map<String, HighlightField> highlightFields = hit.getHighlightFields();// 获取高亮highlight标签下的所有内容
            HighlightField titleField = highlightFields.get("title");// 获取highlight标签中高亮的title字段

            if(titleField!=null){
                String title="";
                Text[] fragments = titleField.fragments();// 解析字段title并循环获取到它的值
                for (Text fragment : fragments) {
                    title+=fragment;
                }

                // 这里就是用高亮字段，去替换掉source中本来的title字段
                source.put("title",title);
            }
            // *****************高亮字段替换  end *******************


            // 替换完成，放入list中
            list.add(source);

        }

        return list;
    }
}
