package com.sailors.common.task.sdk.core.handle.factory;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.sailors.common.task.sdk.core.constants.ApiConstants;
import com.sailors.common.task.sdk.core.constants.Topics;
import com.sailors.common.task.sdk.core.handle.CommonTaskHandler;
import com.sailors.common.task.sdk.core.handle.KafkaProducer;
import com.sailors.common.task.sdk.core.model.FileTaskCreateDto;
import com.sailors.common.task.sdk.core.model.FileTaskFailedDto;
import com.sailors.common.task.sdk.core.model.FileTaskLogDetailVo;
import com.sailors.common.task.sdk.core.model.RefreshProcessDto;
import lombok.AccessLevel;
import lombok.NoArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.errors.RecordTooLargeException;
import org.springframework.http.HttpEntity;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpMethod;
import org.springframework.http.ResponseEntity;

import java.util.concurrent.ExecutionException;

/**
 * File task handler
 *
 * @description: 文件任务执行类
 * @author: cyj
 * @date 2022 -11-25 14:51:19
 */
@Slf4j
@NoArgsConstructor(access = AccessLevel.PRIVATE)
public class FileTaskHandler extends KafkaProducer<FileTaskLogDetailVo> implements CommonTaskHandler {

    private volatile static FileTaskHandler fileTaskHandler;

    protected static FileTaskHandler getSingleton() {
        if (fileTaskHandler == null) {
            synchronized (FileTaskHandler.class) {
                if (fileTaskHandler == null) {
                    fileTaskHandler = new FileTaskHandler();
                }
            }
        }
        return fileTaskHandler;
    }

    @Override
    public Long createTask(FileTaskCreateDto fileTaskCreateDto) {
        HttpHeaders headers = new HttpHeaders();
        headers.add("Content-Type", "application/json");
        HttpEntity<FileTaskCreateDto> requestEntity = new HttpEntity<>(fileTaskCreateDto, headers);
        log.info("创建文件任务参数：{}", fileTaskCreateDto);
        ResponseEntity<JSONObject> result = restTemplate.exchange(ApiConstants.createFileTask(), HttpMethod.POST, requestEntity, JSONObject.class);
        log.info("创建文件任务返回值：{}", result.getBody());
        return result.getBody().getLong("dataInfo");
    }

    @Override
    public void refreshProcess(RefreshProcessDto refreshProcessDto) {
        HttpHeaders headers = new HttpHeaders();
        headers.add("Content-Type", "application/json");
        HttpEntity<RefreshProcessDto> requestEntity = new HttpEntity<>(refreshProcessDto, headers);
        log.info("更新文件任务参数：{}", refreshProcessDto);
        ResponseEntity<JSONObject> result = restTemplate.exchange(ApiConstants.refreshProcess(), HttpMethod.PUT, requestEntity, JSONObject.class);
        log.info("更新文任务返回值：{}", result.getBody());
    }

    @Override
    public void refreshProcessAsync(RefreshProcessDto refreshProcessDto) throws ExecutionException, InterruptedException {
        try {
            // 预计2台机器 topic3 分区 2 副本
            ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>(Topics.TASK_PROCESS, calcuPartition(refreshProcessDto.getTaskId()), null, JSON.toJSONString(refreshProcessDto));
            RecordMetadata metadata = producer.send(producerRecord).get();
            log.info("refreshProcessAsync发送消息结果：" + "topic-" + metadata.topic() + "|partition-" + metadata.partition() + "|offset-" + metadata.offset());
        } catch (ExecutionException e) {
            if (e.getMessage().contains("org.apache.kafka.common.errors.RecordTooLargeException")) {
                log.info("文件过大：改成同步接口调用");
                refreshProcess(refreshProcessDto);
            } else {
                throw e;
            }
        }
    }

    @Override
    public void failed(FileTaskFailedDto fileTaskFailedDto) {
        HttpHeaders headers = new HttpHeaders();
        headers.add("Content-Type", "application/json");
        HttpEntity<FileTaskFailedDto> requestEntity = new HttpEntity<>(fileTaskFailedDto, headers);
        log.info("failed文件任务参数：{}", fileTaskFailedDto);
        ResponseEntity<JSONObject> result = restTemplate.exchange(ApiConstants.taskFailed(), HttpMethod.PUT, requestEntity, JSONObject.class);
        log.info("failed文任务返回值：{}", result.getBody());
    }

    @Override
    public void failedAsync(FileTaskFailedDto fileTaskFailedDto) throws ExecutionException, InterruptedException {
        // 预计2台机器 topic3 分区 2 副本
        try {
            ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>(Topics.FAILED_FILE_TASK, calcuPartition(fileTaskFailedDto.getTaskId()), null, JSON.toJSONString(fileTaskFailedDto));
            RecordMetadata metadata = producer.send(producerRecord).get();
            log.info("refreshProcessAsync发送消息结果：" + "topic-" + metadata.topic() + "|partition-" + metadata.partition() + "|offset-" + metadata.offset());
        } catch (ExecutionException e) {
            if (e.getMessage().contains("org.apache.kafka.common.errors.RecordTooLargeException")) {
                log.info("文件过大：改成同步接口调用");
                failed(fileTaskFailedDto);
            } else {
                throw e;
            }
        }
    }

    @Override
    public FileTaskLogDetailVo detail(Long id) {
        HttpHeaders headers = new HttpHeaders();
        headers.add("Content-Type", "application/json");
        HttpEntity requestEntity = new HttpEntity<>(null, headers);
        ResponseEntity<JSONObject> result = restTemplate.exchange(ApiConstants.getFileTask(id), HttpMethod.GET, requestEntity, JSONObject.class);
        log.info("查询文件任务返回值：{}", result);
        return JSON.parseObject(String.valueOf(result.getBody()), FileTaskLogDetailVo.class);
    }

    private int calcuPartition(Long taskId) {
        return (int) (taskId % 3);
    }
}
