package org.fjsei.yewu.service;

import jakarta.annotation.PostConstruct;
import jakarta.persistence.EntityManager;
import jakarta.persistence.PersistenceContext;
import jakarta.transaction.Transactional;
import md.cm.base.Company;
import md.cm.base.Person;
import md.cm.geography.Adminunit;
import md.cm.geography.AdminunitRepository;
import md.specialEqp.Eqp;
import md.specialEqp.inspect.Isp;
import md.specialEqp.type.*;
import org.fjsei.yewu.aop.hibernate.MyMassIndexingLoggingMonitor;
import org.fjsei.yewu.dto.SearchAggregation;
import org.fjsei.yewu.dto.SearchRequest;
import org.fjsei.yewu.dto.SearchResponse;
import org.fjsei.yewu.index.CompanyEs;
import org.hibernate.search.engine.search.aggregation.AggregationKey;
import org.hibernate.search.engine.search.query.SearchResult;
import org.hibernate.search.engine.search.sort.dsl.SortOrder;
import org.hibernate.search.mapper.orm.Search;
import org.hibernate.search.mapper.orm.massindexing.MassIndexer;
import org.hibernate.search.mapper.orm.session.SearchSession;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.elasticsearch.client.elc.NativeQuery;
import org.springframework.data.elasticsearch.core.ElasticsearchOperations;
import org.springframework.data.elasticsearch.core.SearchHits;
import org.springframework.hateoas.PagedModel.PageMetadata;
import org.springframework.stereotype.Service;
import org.springframework.util.Assert;

import java.time.Instant;
import java.util.*;
import java.util.concurrent.Future;
import java.util.stream.Collectors;

/**
 * 【问题】 Hibernate-Search需要创建sequence hsearch_outbox_event_generator；而对于CRDB使用它过于昂贵，能否改进改用UUID替换？
* */

@Service
public class SearchService {
    @Autowired
    private ElasticsearchOperations operations;      //测试增加的；
    //默认=主数据库的
    @PersistenceContext
    private EntityManager entityManager;
    /* @PostConstruct       //启动时间重建索引啊，耗时间
    public void initialize() {
        SearchSession searchSession = Search.session(entityManager.getEntityManagerFactory().createEntityManager());
        MassIndexer indexer = searchSession.massIndexer(Adminunit.class, Person.class).threadsToLoadObjects(2);
        try {
            indexer.startAndWait();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }*/
    final Map<String, Class<?>>  mapIndexClass= new HashMap<>();
    private final AdminunitRepository adminunitRepository;
    //当前正在执行的重建索引任务：同一时间只能唯一个，Future判定结束后才能发起新任务。
    static Future<?>  futureMassindex;
    //针对那些实体做
    static String   massindexEntities;
    static Instant  beginTime;      //任务启动时；
    //不能final申明长生命的； 单次任务用的都 要new()
    static  MyMassIndexingLoggingMonitor  massIndexingMonitor;

    public SearchService(AdminunitRepository adminunitRepository) {
        this.adminunitRepository = adminunitRepository;
    }
    /**本后端目前全部的ES索引 5个，而Eqp特殊有子类会独立再开8个索引。单独用子类如Elevator建索引反而查询更慢。
     * 登记可用的所有索引:  索引模型会交叉定义，应该从最终需求入手，倒着来设置搜索模型，保持最小化精简化。
     * 需求反向驱动搜索模型的定义：前端展示需要保留Company和Person的搜索业务需求。 而Adminunit反而还没有迫切需求展示删除ES索引模型。
     * */
    @PostConstruct
    private void init() {
        mapIndexClass.put("Company", Company.class);
        mapIndexClass.put("Person", Person.class);
        mapIndexClass.put("PipingUnit", PipingUnit.class);
        mapIndexClass.put("Isp", Isp.class);
        mapIndexClass.put("Eqp", Eqp.class);
        //加上子类部分的: 可允许单独针对某设备类型重建索引的。 少一个Ropeway.class, Eqp类真正有9个物理索引
        mapIndexClass.put("Elevator", Elevator.class);
        mapIndexClass.put("Crane", Crane.class);
        mapIndexClass.put("Vessel", Vessel.class);
        mapIndexClass.put("Boiler", Boiler.class);
        mapIndexClass.put("Pipeline", Pipeline.class);
        mapIndexClass.put("FactoryVehicle", FactoryVehicle.class);
        mapIndexClass.put("Amusement", Amusement.class);
    }

    /**对某一个实体索引执行reindex操作：手动，暂时相应索引不能使用的
     * 官方文档https://docs.jboss.org/hibernate/search/     @ 8.2.2. Conditional reindexing
     *没注解@Index的报错 Adminunit  types are not indexed, nor is any of their subtypes;
     * massIndexer会中断前端可用性！，重建过程之中，大概率无法查询出来。加purgeAllOnStart(false)改观了；
     * 单步步进措施：.reindexOnly仅仅重建某一小部分的避免影响扩大。 ES的_id都是文本存的。OpenSearch server就等ES一樣。
     * REST版本，离开前端可独立测试用:
     * */
    public Boolean doMassIndex(String[] indexEntityNames) {
        SearchSession searchSession = Search.session(entityManager.getEntityManagerFactory().createEntityManager());
        MassIndexer indexer =null;
        UUID uuid= UUID.fromString("00011bff-98ec-4181-83be-8963200cdf66");
        //用massIndexer()全部索引都上啊；
        if(indexEntityNames.length==1 && "ALL.INDEXS".equals(indexEntityNames[0]))
            indexer = searchSession.massIndexer()
                    .purgeAllOnStart(false)         //结束后并没有发现会重复的.可以免去不可用期间。
                    .threadsToLoadObjects(1);
        else{
            List<Class<?>> clazzTodo= Arrays.stream(indexEntityNames).map(mapIndexClass::get).collect(Collectors.toList());
            indexer = searchSession.massIndexer(clazzTodo).purgeAllOnStart(false)  //结束后并没有发现会重复的.可以免去不可用期间。
                    .threadsToLoadObjects(1);
            //必須JPQL語法的reindexOnly()語句。
            //indexer.type(Node.class).reindexOnly( "e.vlg is not null" );
            //indexer.type(Node.class).reindexOnly( "e.id <= :id" ).param( "id", uuid );
        }
        beginTime = Instant.now();
        massIndexingMonitor= new MyMassIndexingLoggingMonitor(20000);
        futureMassindex = indexer.monitor(massIndexingMonitor).start().toCompletableFuture();
//        try {
//            //刚启动 CRDB重试太多：毛病; 过一会才会真上正轨;
//            indexer.startAndWait();       同步死等版
//        } catch (InterruptedException e) {
//            e.printStackTrace();
//            return false;
//        }
        return true;
    }
    /**异步方式 启动ES索引重建任务
     * graphQL版 页面端维护作业 ：支持“ALL.INDEXS”全部HS管理的索引一次性发起重建同步。
     * */
    public Boolean startMassIndex(String indexs) {
        if(null!=futureMassindex) {
            Assert.isTrue(futureMassindex.isDone(), "上次任务未结束"+massindexEntities);
        }
        massindexEntities =indexs;
        String[] indexEntityNames= indexs.split(",");
        SearchSession searchSession = Search.session(entityManager.getEntityManagerFactory().createEntityManager());
        MassIndexer indexer =null;
        //用massIndexer()全部索引都上啊；
        if(indexEntityNames.length==1 && "ALL.INDEXS".equals(indexEntityNames[0]))
            indexer = searchSession.massIndexer()
                    .purgeAllOnStart(false)         //结束后并没有发现会重复的.可以免去不可用期间。
                    .threadsToLoadObjects(1);
        else{
            List<Class<?>> clazzTodo= Arrays.stream(indexEntityNames).map(mapIndexClass::get).collect(Collectors.toList());
            indexer = searchSession.massIndexer(clazzTodo).purgeAllOnStart(false)  //结束后并没有发现会重复的.可以免去不可用期间。
                    .threadsToLoadObjects(1);
         //    indexer.type(Isp.class).reindexOnly( "e.id <= :id" ).param( "id", uuid );    //JPQL語法
        }
        beginTime = Instant.now();
        massIndexingMonitor= new MyMassIndexingLoggingMonitor(20000);
        //刚启动 CRDB重试太多：毛病; 过一会才会真上正轨;
        //HS为何能迅速执行完? Hibernate: select generatedAlias0.id from Isp as generatedAlias0 # select isp0_.id as col_0_0_ from Isp isp0_难道数据库异步模式
        futureMassindex = indexer.monitor(massIndexingMonitor)
                .start().toCompletableFuture();
        return  null!=futureMassindex;
    }
    /**异步方式 判定当前的索引重建任务结束了*/
    public Boolean isCompleteMassIndex(String[] indexEntityNames) {
        Assert.notNull(futureMassindex, "索引重建任务没开启");
        return   futureMassindex.isDone();
    }
    /**异步方式 尝试取消当前的索引重建任务*/
    public Boolean cancelMassIndex(String[] indexEntityNames) {
        Assert.notNull(futureMassindex, "任务没开启");
        return   futureMassindex.cancel(true);
    }
    public Instant getBeginTime(){
        return this.beginTime;
    }
    public MyMassIndexingLoggingMonitor getMassIndexingMonitor(){
        return this.massIndexingMonitor;
    }
    public String getMassindexEntities(){
        return this.massindexEntities;
    }
    /**
     * method with all search
     * apply aggregation
     *可以直接用 "analyzer": "ik_smart"， 后台的ES集群已经安装了ik分词器的包。
     *
     */
    @Transactional
    public SearchResponse<Person> searchQuery1(SearchRequest request) {
        SearchSession searchSession = Search.session( entityManager );
        AggregationKey<Map<String, Long>> countByAbbreviation = AggregationKey.of("countByAbbreviation");
        //不能动态修改fields() ?
        SearchResult<Person> result = searchSession.search(Person.class)
            .where(f -> f.match().fields("no_sort")     //"document.title", "document.abbreviation"
                .matching(request.keyword))
            .sort(f -> f.composite( b -> {
                if(request.page != null)  {
                    request.page.getSort().get().forEach(s -> {
                        b.add(f.field(s.getProperty()).order(s.isAscending() ? SortOrder.ASC : SortOrder.DESC));
                    });
                }
            }))
            .aggregation(countByAbbreviation, f -> f.terms().field("no_sort", String.class))    //"document.abbreviation"
            .fetch(
                request.page.getPageNumber() * request.page.getPageSize(),
                request.page.getPageSize()
            );


        List<SearchAggregation> facetAbbreviation = result.aggregation(countByAbbreviation)
            .entrySet()
            .stream()
            .map(e -> new SearchAggregation(e.getKey(), e.getValue()))
            .collect(Collectors.toList());
        SearchResponse<Person> response = new SearchResponse<>(
            result.hits(), 
            new PageMetadata(request.page.getPageSize(), request.page.getPageNumber(), result.total().hitCount()),
            facetAbbreviation);
        
        return response;
    }
    /** ES 8.2官方文档  https://www.elastic.co/guide/en/elasticsearch/reference/8.2/search-your-data.html
     *ES查询方式simplequerystring是querystring简化版;  match: phrase: 都是全文搜索方式的。Text类型不要用term查而该用match query;
     * 全文检索matchPhraseQuery{}，可调整slop参数。matchPhraseQuery("name",).slop(2) 容许相差2个数的单词(顺序调换或多出单词)但是单词缺少绝对不行的。
     * 参数slop() 容许相差slop个单词的，只能多，输入该有的单词都必须有。 而filter或者bool算是过滤器。
     * 除了termQuery查询terms query方式wildcard和regexp查询与prefix/range查询+id查询几个算精确查询的对Keyword类型,exists，其它都是分词Text倒排索引搜索的。
     * 针对KeywordField字段做match会和用term查询返回一样的；
     .where(f -> f.bool().should(f.exists().field("ad") ) ) 返回396条
     .where(f -> f.nested().objectField("ad").nest(s->s.id().matching(adminunit.getId()) )) 错误，id()不能用于嵌套nest情形。
     .where(f -> f.nested().objectField("ad").nest(s->s.match().field("id").matching(adminunit.getId()) )) 报错不知道id字段？
     .where(f -> f.nested().objectField("ad").nest(f.withRoot("ad").match().field("id").matching(adminunit.getId()) )) 对了！
     .where(f -> f.nested().objectField("ad").nest(f.match().field("ad.id").matching(adminunit.getId()) )) 简化点了；
     .where(f ->f.match().field("ad.id").matching(adminunit.getId()) )  最简洁！     (.matchAll())
     * */
    @Transactional
    public SearchResponse<? extends Eqp> searchQuery5(SearchRequest request) {
        SearchSession searchSession = Search.session( entityManager );
        Adminunit adminunit=adminunitRepository.findTopByAreacode("35011107"); //.equals(adminunit))  .matching(request.keyword)
        String uuids="c8b17099-6557-4258-9d9a-bb0ee6432652";
         //UUID uuid= UUID.fromString(request.keyword);
        SearchResult<? extends Eqp> result = searchSession.search(Eqp.class)
                .where(f ->f.match().field("oid").matching(request.keyword) )
                .sort(f -> f.composite( b -> {
                    if(request.page != null)  {
                        request.page.getSort().get().forEach(s -> {
                            b.add(f.field(s.getProperty()).order(s.isAscending() ? SortOrder.ASC : SortOrder.DESC));
                        });
                    }
                }))
                .fetch(
                        request.page.getPageNumber() * request.page.getPageSize(),
                        request.page.getPageSize()
                );

        SearchResponse<? extends Eqp> response = new SearchResponse<>(
                result.hits(),
                new PageMetadata(request.page.getPageSize(), request.page.getPageNumber(), result.total().hitCount()),
                null);
        return response;
    }
    @Transactional
    public SearchResponse<PipingUnit> searchQuery(SearchRequest request) {
        SearchSession searchSession = Search.session( entityManager );
        Adminunit adminunit=adminunitRepository.findTopByAreacode("35011107"); //.equals(adminunit))  .matching(request.keyword)
        String uuids="c8b17099-6557-4258-9d9a-bb0ee6432652";
        UUID uuid= UUID.fromString(request.keyword);
        Pipeline pipeline=new Pipeline();
        pipeline.setId(uuid);
        SearchResult<PipingUnit> result = searchSession.search(PipingUnit.class)
                .where(f ->f.match().field("pipe.id").matching(uuid) )
                .sort(f -> f.composite( b -> {
                    if(request.page != null)  {
                        request.page.getSort().get().forEach(s -> {
                            b.add(f.field(s.getProperty()).order(s.isAscending() ? SortOrder.ASC : SortOrder.DESC));
                        });
                    }
                }))
                .fetch(
                        request.page.getPageNumber() * request.page.getPageSize(),
                        request.page.getPageSize()
                );

        SearchResponse<PipingUnit> response = new SearchResponse<>(
                result.hits(),
                new PageMetadata(request.page.getPageSize(), request.page.getPageNumber(), result.total().hitCount()),
                null);
        return response;
    }

    /**测试 新版ES8;
     * */
    public SearchHits<CompanyEs> search(String searchterms) {
        //@Field(type = FieldType.Text)  String text;
        SearchHits<CompanyEs> results = operations.search(
                NativeQuery.builder().withQuery(
                        q -> q.match(
                                m -> m.field("name").query(searchterms)
                        )
                ).build(),
                CompanyEs.class
        );
        return results;
    }
    //测试 新版ES8;
    public List<CompanyEs> searchResults(String searchterms) {
        SearchHits<CompanyEs> results = search(searchterms);
        return results.getSearchHits().stream().map(
                hit -> hit.getContent()
        ).toList();
    }
}

