package cn.org.intelli.zjgflink.util;


import org.apache.flink.api.common.JobID;
import org.apache.flink.client.program.ClusterClient;
import org.apache.flink.client.program.ClusterClientProvider;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.core.execution.SavepointFormatType;
import org.apache.flink.runtime.client.JobStatusMessage;
import org.apache.flink.yarn.YarnClientYarnClusterInformationRetriever;
import org.apache.flink.yarn.YarnClusterClientFactory;
import org.apache.flink.yarn.YarnClusterDescriptor;
import org.apache.flink.yarn.YarnClusterInformationRetriever;
import org.apache.flink.yarn.configuration.YarnConfigOptions;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;

import java.util.Collection;
import java.util.Iterator;
import java.util.concurrent.CompletableFuture;

@Component
public class FlinkJobStop {
    private static final Logger log = LoggerFactory.getLogger(cn.org.intelli.zjgflink.util.FlinkJobStop.class);

    @Value("${flink.hdfs.job}")
    private String flinkHdfsJob;

    public String stopFlinkJob(String appId, String flinkJobId) throws Exception {
        log.info("@FlinkJobStop: deploy flink job: Yarn applicationId is {}, flinkJobId is {}", appId, flinkJobId);
        System.setProperty("HADOOP_USER_NAME", "root");
        YarnClient yarnClient = YarnClient.createYarnClient();
        YarnConfiguration yarnConfiguration = new YarnConfiguration();
//        yarnClient.init((Configuration)yarnConfiguration);
        yarnClient.start();
        try {
            YarnClientYarnClusterInformationRetriever yarnClientYarnClusterInformationRetriever = YarnClientYarnClusterInformationRetriever.create(yarnClient);
            Configuration flinkConfiguration = new Configuration();
            flinkConfiguration.set(YarnConfigOptions.APPLICATION_ID, appId);
            YarnClusterDescriptor yarnClusterDescriptor = new YarnClusterDescriptor(flinkConfiguration, yarnConfiguration, yarnClient, yarnClientYarnClusterInformationRetriever, true);
            YarnClusterClientFactory clusterClientFactory = new YarnClusterClientFactory();
            ApplicationId applicationId = clusterClientFactory.getClusterId(flinkConfiguration);
            ClusterClientProvider<ApplicationId> clusterClientProvider = yarnClusterDescriptor.retrieve(applicationId);
            ClusterClient<ApplicationId> clusterClient = clusterClientProvider.getClusterClient();
            Collection<JobStatusMessage> jobStatusMessages = clusterClient.listJobs().get();
            System.out.print(jobStatusMessages.size());
            JobID jobId = null;
            Iterator<JobStatusMessage> iterator = jobStatusMessages.iterator();
            if (iterator.hasNext()) {
                JobStatusMessage jobStatusMessage = iterator.next();
                jobId = jobStatusMessage.getJobId();
            }
            CompletableFuture<String> completableFuture = clusterClient.cancelWithSavepoint(jobId, this.flinkHdfsJob + "/" + flinkJobId, SavepointFormatType.DEFAULT);
            String savePoint = completableFuture.get();
            yarnClusterDescriptor.killCluster(applicationId);
            log.info("@FlinkJobStop: stop flink job {} successful, savePoint is {}", flinkJobId, savePoint);
            return savePoint;
        } catch (Exception e) {
            e.printStackTrace();
            throw new Exception(e);
        } finally {
            yarnClient.stop();
        }
    }
}
