package cn.com.greatwall.kafka.service.impl;

import java.io.IOException;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;

import javax.servlet.http.HttpServletResponse;

import org.apache.kafka.clients.admin.DescribeClusterResult;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.cache.annotation.Cacheable;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.Pageable;
import org.springframework.data.domain.Sort;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;

import cn.com.greatwall.common.utils.FileUtil;
import cn.com.greatwall.common.utils.PageUtil;
import cn.com.greatwall.common.utils.QueryHelp;
import cn.com.greatwall.common.utils.ValidationUtil;
import cn.com.greatwall.kafka.domain.Cluster;
import cn.com.greatwall.kafka.repository.ClusterRepository;
import cn.com.greatwall.kafka.service.ClusterService;
import cn.com.greatwall.kafka.service.KafkaService;
import cn.com.greatwall.kafka.service.dto.ClusterDto;
import cn.com.greatwall.kafka.service.dto.criteria.ClusterQueryCriteria;
import cn.com.greatwall.kafka.service.mapstruct.ClusterMapper;
import cn.com.greatwall.system.modules.system.domain.Job;
import cn.com.greatwall.system.modules.system.service.dto.JobDto;
import lombok.RequiredArgsConstructor;

/**
 * @Author herw
 * @Time 2021-01-13 08:54:52
 * @Version 1.0
 * @Description: TODO(用一句话描述该文件做什么)
 */
@Service
@RequiredArgsConstructor
public class ClusterServiceImpl implements ClusterService {
    private final ClusterRepository clusterRepository;
    private final ClusterMapper clusterMapper;
    @Autowired
    private KafkaService kafkaService;

    @Override
    public List<ClusterDto> queryAll() {
        Sort sort = Sort.by(Sort.Direction.ASC, "name");
        return clusterMapper.toDto(clusterRepository.findAll(sort));
    }
    
    @Override
    public void download(List<ClusterDto> clusterDtos, HttpServletResponse response) throws IOException {
        List<Map<String, Object>> list = new ArrayList<>();
        for (ClusterDto clusterDTO : clusterDtos) {
            Map<String, Object> map = new LinkedHashMap<>();
            map.put("集群名称", clusterDTO.getName());
            map.put("ZooKeeper", clusterDTO.getZkAddress());
            map.put("Kafka", clusterDTO.getBroker());
            map.put("brokerSize", clusterDTO.getBrokerSize());
            map.put("Kafka版本", clusterDTO.getKafkaVersion());
            map.put("enable", clusterDTO.getEnable() ? "启用" : "禁用");
            map.put("location", clusterDTO.getLocation());
            map.put("备注", clusterDTO.getComments());
            map.put("创建日期", clusterDTO.getCreateTime());
            list.add(map);
        }
        FileUtil.downloadExcel(list, response);
    }

    @Override
    public Map<String, Object> queryAll(ClusterQueryCriteria criteria, Pageable pageable) {
        Page<Cluster> page = clusterRepository.findAll(
                (root, criteriaQuery, criteriaBuilder) -> QueryHelp.getPredicate(root, criteria, criteriaBuilder),
                pageable);
        return PageUtil.toPage(page.map(clusterMapper::toDto));
    }

    @Override
    public List<ClusterDto> queryAll(ClusterQueryCriteria criteria) {
        return clusterMapper.toDto(clusterRepository.findAll(
                (root, criteriaQuery, criteriaBuilder) -> QueryHelp.getPredicate(root, criteria, criteriaBuilder)));
    }
    
    @Override
    public ClusterDto findById(long id) {
        Cluster cluster = clusterRepository.findById(id).orElseGet(Cluster::new);
        ValidationUtil.isNull(cluster.getId(), "Cluster", "id", id);
        return clusterMapper.toDto(cluster);
    }

    @Override
    @Transactional(rollbackFor = Exception.class)
    public void create(Cluster resources) {
        clusterRepository.save(resources);
    }

    @Override
    @Transactional(rollbackFor = Exception.class)
    public void update(Cluster resources) {
        Cluster cluster = clusterRepository.findById(resources.getId()).orElseGet(Cluster::new);
        ValidationUtil.isNull(cluster.getId(), "Cluster", "id", resources.getId());
        resources.setId(cluster.getId());
        clusterRepository.save(resources);
    }

    @Override
    @Transactional(rollbackFor = Exception.class)
    public void delete(Set<Long> ids) {
        clusterRepository.deleteAllByIdIn(ids);
    }

    @Override
    public boolean getClusterStatus(Cluster resources) {
        try {
            DescribeClusterResult describeClusterResult = kafkaService.getKafkaUtil(resources.getId().toString())
                    .describeClusterResult();
            int brokers = describeClusterResult.nodes().get().size();
            if (brokers == 0) {
                return false;
            }
            if (resources.getEnable()) {
                if (resources.getBrokerSize() > brokers) {
                    return false;
                } else if (resources.getBrokerSize() == brokers) {
                    return true;
                }
            } else {
                return true;
            }
        } catch (Exception e) {
            return false;
        }
        return false;
    }
}
