package org.fjsei.yewu.job.service;

import com.alibaba.fastjson2.JSON;
import jakarta.annotation.PostConstruct;
import jakarta.persistence.EntityManager;
import jakarta.persistence.PersistenceContext;
import lombok.extern.slf4j.Slf4j;
import md.cm.geography.AdminunitRepository;
import md.specialEqp.Eqp;
import org.fjsei.yewu.dto.SearchResponse;
import org.fjsei.yewu.index.CompanyEs;
import org.fjsei.yewu.job.controller.model.TestJobRequest;
import org.fjsei.yewu.jpa.NormalExecutor;
import org.fjsei.yewu.repository.NodeTPi;
import org.fjsei.yewu.repository.maint.SplJob;
import org.fjsei.yewu.repository.maint.SplJobRepository;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.ApplicationContext;
import org.springframework.data.domain.PageRequest;
import org.springframework.data.domain.Pageable;
import org.springframework.data.domain.Slice;
import org.springframework.data.domain.Sort;
import org.springframework.data.elasticsearch.core.ElasticsearchOperations;
import org.springframework.data.elasticsearch.core.SearchHits;
import org.springframework.hateoas.PagedModel.PageMetadata;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;

import java.time.Instant;
import java.util.*;

import static graphql.Assert.assertNotNull;

/**
 * 【问题】 Hibernate-Search需要创建sequence hsearch_outbox_event_generator；而对于CRDB使用它过于昂贵，能否改进改用UUID替换？
* */
@Slf4j
@Service
public class SplitTask {
    @Autowired
    private ElasticsearchOperations operations;
    @PersistenceContext
    private EntityManager entityManager;
    @PersistenceContext(unitName = "entityManagerFactorySei")
    private EntityManager emSei;                //EntityManager相当于hibernate.Session：

    private final AdminunitRepository adminunitRepository;
//    @Autowired
//    protected StoreSyncRepository storeSyncRepository;
    @Autowired protected SplJobRepository splJobRepository;
    static Instant  beginTime;      //任务启动时；
    @Autowired
    private ApplicationContext applicationContext;

    public SplitTask(AdminunitRepository adminunitRepository) {
        this.adminunitRepository = adminunitRepository;
    }

    @PostConstruct
    private void init() {
    }

    @Transactional
    public List<SplJob> commitSplit(List<SplJob> batch) {
       return splJobRepository.saveAll(batch);
    }
    @Transactional
    public SplJob commitSplit(SplJob splJob) {
        return splJobRepository.save(splJob);
    }
        //5个参数的
        public List<SplJob> repoSplit(TestJobRequest req) {
            NormalExecutor applyRepository = null;
            try {
                Class<?> clazz = Class.forName("org.fjsei.yewu.repository.StoreSyncRepository");
                applyRepository= (NormalExecutor) applicationContext.getBean(clazz);
            } catch (ClassNotFoundException e) {
                e.printStackTrace();
            }
            assertNotNull(applyRepository, () -> "applyRepository空");
            int  MAX_SPLTNUM=req.spltnum; // 将数据表拆分为100份
            long totalCount = applyRepository.count();
            long chunkSize = (totalCount + MAX_SPLTNUM - 1) / MAX_SPLTNUM; // 向上取整计算每份的行数（使用长整型以防溢出）
            List<Object> firstIdsOfChunks = new ArrayList<>(MAX_SPLTNUM);
            for (int chunkNumber = 1; chunkNumber < MAX_SPLTNUM; chunkNumber++) {
                long startRow = chunkNumber * chunkSize -1; // 计算当前份的第一条记录的索引
                Pageable pageable = PageRequest.of((int) startRow, 1, Sort.by(Sort.Direction.ASC, "id"));  // 注意：这里startRow需要转换为int，但要确保它不会溢出
                // 检查startRow是否超出Integer.MAX_VALUE，如果超出，则需要特殊处理（例如，分批处理或改用其他方法）
                if(startRow > Integer.MAX_VALUE)    throw new IllegalArgumentException("表行数太多");
                //输出日志 Hibernate:  <criteria>   select ss1_0.id from StoreSync ss1_0 order by 1 limit ? offset ?
                Slice<NodeTPi> slice=applyRepository.readAllBy( pageable, NodeTPi.class);
                Object firstId=slice.hasContent()? slice.getContent().get(0).getId() :null;
                if(null!=firstId)  firstIdsOfChunks.add(firstId);
            }
            List<SplJob> batch=new ArrayList<>();
            for(int i=0; i<firstIdsOfChunks.size()+1; i++)
            {
                SplJob ado=SplJob.builder().start(0==i?null:JSON.toJSONString(firstIdsOfChunks.get(i-1)))
                        .endr(i<firstIdsOfChunks.size()? JSON.toJSONString(firstIdsOfChunks.get(i)) :null)
                        .groupName("device").jobName("infsyn-"+(i+1)).limt(30).offs(0L)
                        .build();
                batch.add(ado);
            }
            return batch;
        }
        //接口页翻页情形
    public List<SplJob> pageSplit(TestJobRequest req) {
        double pagesf = (double) req.allrows/req.pagesize;
        long pagesum = (long) Math.ceil(pagesf);
        int  MAX_SPLTNUM=req.spltnum;
        long totalCount =pagesum;
        assertNotNull(totalCount>MAX_SPLTNUM, () -> "太少");
        long chunkSize = (totalCount + MAX_SPLTNUM - 1) / MAX_SPLTNUM;
        List<Long> firstIdsOfChunks = new ArrayList<>(MAX_SPLTNUM);
        for (int chunkNumber=1; chunkNumber <= MAX_SPLTNUM; chunkNumber++) {
            long pend = chunkNumber * chunkSize -1;
            if(pend > Integer.MAX_VALUE)    throw new IllegalArgumentException("太多");
            if(pend>totalCount)  pend=totalCount;
            firstIdsOfChunks.add(pend);
        }
        List<SplJob> batch=new ArrayList<>();
        for(int i=0; i<firstIdsOfChunks.size(); i++)
        {
            long start= 0==i? 1 : (firstIdsOfChunks.get(i-1)+1);
            long endr= firstIdsOfChunks.get(i);
            SplJob ado=SplJob.builder().start(JSON.toJSONString(start))
                    .endr(JSON.toJSONString(endr))
                    .groupName("DeviceKey").jobName(""+(i+1)).limt(req.pagesize).offs(0L)
                    .build();
            batch.add(ado);
        }
        return batch;
    }

    public SearchResponse<? extends Eqp> trySplit(TestJobRequest req) {
        List<SplJob> batch;
        if(2==req.type) batch=pageSplit(req);
        else batch=repoSplit(req);
        List<SplJob> list=commitSplit(batch);     //不管读取ID的过程，单独用小的事务提交
        log.info("trySplit: countall={}",list.size());
//        SplJob splJob= splJobRepository.findByGroupNameAndJobName("device","infsyn-22");
        SearchResponse<? extends Eqp> response = new SearchResponse<>(
                null,
                new PageMetadata(1,0, list.size()),
                null);
        return response;
    }
    //
    public List<CompanyEs> searchResults(String searchterms) {
        SearchHits<CompanyEs> results = null;
        return results.getSearchHits().stream().map(
                hit -> hit.getContent()
        ).toList();
    }

}
