package iwb.utils.kafkapatition;

import araf.utils.ArafStringUtils;
import com.alibaba.fastjson.JSON;
import iwb.utils.RedisUtils;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.redisson.api.RMap;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.core.env.Environment;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
import org.springframework.util.CollectionUtils;

import javax.annotation.PostConstruct;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;


@Slf4j
@Data
public abstract class PartitionExecAbstract {


    private ThreadPoolTaskExecutor threadPool;

    @Autowired
    private Environment env;

    @Autowired
    private RedisUtils redisUtils;

    /**
     * 分段发送数据的方法,子类上加注解
     *
     * @param data
     */
    public abstract void sender(List data);


//    @PostConstruct
//    private void init() {
//        int threadPoolSize = getThreadPoolSize();
//        log.info("ThreadPoolInitSize:{}.Topic:{}.", threadPoolSize, getTopic());
//        threadPool = new ThreadPoolExecutor(threadPoolSize, threadPoolSize, 0, TimeUnit.MILLISECONDS,
//                new LinkedBlockingQueue<>());
//    }

    @Autowired
    public void setThreadPool(ThreadPoolTaskExecutor getExecutor){
        this.threadPool = getExecutor;
    }

    private int getThreadPoolSize() {
        int defaultSize = 8;
        String acquiescence = env.getProperty("iwb.processor.core.pool.size");
        log.debug("iwb.processor.core.pool.size:{}.", acquiescence);
        defaultSize = Integer.parseInt(acquiescence != null ? acquiescence : String.valueOf(defaultSize));

        String topicCustomSize = env.getProperty(getTopic());
        log.debug(getTopic() + ":{}.", topicCustomSize);
        defaultSize = Integer.parseInt(topicCustomSize != null ? topicCustomSize : String.valueOf(defaultSize));

        return defaultSize;
    }

    /**
     * 监听kafka中的topic
     *
     * @param records
     * @param ack
     */
    @KafkaListener(groupId = "#{__listener.groupId}", topics = "#{__listener.topic}", containerFactory = "partitionContainerFactory")
    public void listen(List<ConsumerRecord<String, String>> records, Acknowledgment ack) {
        try {
            for (final ConsumerRecord<String, String> r : records) {
                Messager messager = null;
                if(isCompress()){
                    messager = JSON.parseObject(ArafStringUtils.uncompress(r.value()), Messager.class);
                } else {
                    messager = JSON.parseObject(r.value(), Messager.class);
                }

                this.consumer(messager);
                redisUtils.removeRedisKeyValue(this.getDataTypeId(), messager.getIndexScope());
            }

            ack.acknowledge(); // 手动提交偏移量
        } catch (Exception e) {
            log.error("IwbSubscribe listen error!", e);
        }
    }

    public abstract boolean isCompress();

    public abstract void consumer(Messager messager) throws InterruptedException;

    public String getGroupId() {
        return env.getProperty("spring.application.name").concat("-pe");
    }


    /**
     * topics
     *
     * @return topics
     */
    public String getTopic() {
        return
                String.format("OPRA-PARALLEL-%s", getDataTypeId()).concat("-ps");
    }

    /**
     * getDataTypeId.
     *
     * @return String
     */
    public abstract String getDataTypeId();

    protected ThreadPoolTaskExecutor getExecutorService() {
        return this.threadPool;
    }


    /**
     * 调用sender以后，使用waitProcess检查redis中key是否执行完.
     */
    public void waitProcess(int ms) {
        try {
            int i = 0;
            while (ms == 0 || i < ms) {
                Thread.sleep(1000);
                i = +1000;
                RMap<String, Object> rMap = this.getRedisUtils().getFileLoadContext(getDataTypeId());
                if (CollectionUtils.isEmpty(rMap)) {
                    rMap.delete();
                    break;
                }
                if (CollectionUtils.isEmpty(rMap.values())) {
                    rMap.delete();
                    break;
                }
                if (!CollectionUtils.isEmpty(rMap.values())) {
                    log.debug(getDataTypeId() + " :" + rMap.values());
                    rMap.expire(2, TimeUnit.SECONDS);
                }
            }
        } catch (InterruptedException e) {
            log.error(e.toString());
        }
    }

}
