package t20250313_setparallelism;

import com.alibaba.fastjson.JSONObject;
import org.apache.flink.client.program.PackagedProgram;
import org.apache.flink.client.program.PackagedProgramUtils;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.jobgraph.JobGraph;
import org.apache.flink.runtime.jobgraph.JobGraphUtils;
import org.apache.flink.runtime.jobgraph.JobVertex;
import org.apache.flink.runtime.jobgraph.jsonplan.JsonPlanGenerator;
import org.apache.flink.streaming.api.graph.StreamEdge;
import org.apache.flink.streaming.api.graph.StreamGraph;
import org.apache.flink.streaming.api.graph.StreamNode;

import java.io.File;
import java.util.HashMap;
import java.util.Map;

/**
 * flink sql 针对算子修改并行度
 *
 */
public class Test1 {

    public static void main(String[] args) throws Exception {


        Configuration configuration = new Configuration();

        String[] jobArgs = new String[]{"--sql", Sql.SQL};

        PackagedProgram packagedProgram = PackagedProgram.newBuilder()
                .setJarFile(new File("D:\\code\\flink-all\\flink_demo\\sql-engine\\target\\sql-engine-1.0-SNAPSHOT.jar"))
                .setEntryPointClassName("SqlEngine")
                .setConfiguration(configuration)
                .setArguments(jobArgs)
                .build();


        StreamGraph streamGraph = (StreamGraph)PackagedProgramUtils.getPipelineFromProgram(packagedProgram,
                configuration, 2, false);


        System.out.println("jobGraph modify parallelism before:");
        JobGraph jobGraph = streamGraph.getJobGraph();
        for (JobVertex vertex : jobGraph.getVertices()) {
            System.out.println(vertex.getID() + "  " + vertex.getParallelism());
        }
        printGraph(streamGraph);
//        printGraphDesc(streamGraph);

        System.out.println("======================== modify parallelism ====================================");
        int i = 1;
        for (StreamNode streamNode : streamGraph.getStreamNodes()) {
            streamNode.setParallelism(i++);
        }
        System.out.println("======================== modify parallelism ====================================");

        System.out.println("jobGraph modify parallelism before:");
        jobGraph = streamGraph.getJobGraph();
        for (JobVertex vertex : jobGraph.getVertices()) {
            System.out.println(vertex.getID() + "  " + vertex.getParallelism());
        }
        printGraph(streamGraph);
//        printGraphDesc(streamGraph);

        /**
         * 1. 校验SQL语法时会自动生成streamGraph:通过flink-client提供的工具类可以解析
         * 2. streamGraph提供的了修改并行度的方法得到sg1，并将sg1转换为Json并存储
         * 3. 从sg1得到jobGraph，并在UI上展示
         * 4. UI提供修改并行度的按钮，这里用户点击的jobGraph，但是实际修改时操作的是streamGraph，怎么处理这个对应关系?
         *    jobGraph会进行operatorChain 优化，合并之后的算子在jobGraph中的 description 字段中会用->连接起来
         * 5. 除了修改streamGraph的并行度之外，还需要修改分区规则:ship_strategy,比如当上下游算子不一致时需要将forward修改为reblance
         * 6. 保存更新字后的streamGraph，得到新的jobGraph进行作业提交已经更新UI。
         */










    }


    private static void printGraphDesc(StreamGraph streamGraph) {

        HashMap<Integer, String> pas = new HashMap<>();

        System.out.println("================================= streamGraph ================================");
        String streamingPlanAsJSON = streamGraph.getStreamingPlanAsJSON();
        JSONObject obj = JSONObject.parseObject(streamingPlanAsJSON);
        for (Object nodes : obj.getJSONArray("nodes")) {
            JSONObject node = (JSONObject) nodes;
            System.out.println("contents:" + node.getString("contents").trim());
            pas.put(node.getString("contents").trim().hashCode(), node.getInteger("parallelism")+"");
        }




        System.out.println("=================================  jobGraph  ================================");
        String jobGraphJson = JsonPlanGenerator.generatePlan(streamGraph.getJobGraph());
        obj = JSONObject.parseObject(jobGraphJson);
        for (Object nodes : obj.getJSONArray("nodes")) {
            JSONObject node = (JSONObject) nodes;
            String description = node.getString("description");
            String parallelism = node.getString("parallelism");
            String[] split = description.split("-&gt;");
            System.out.println("node:" + split.length);
            for (String s : split) {
                System.out.println("  description:" + s.trim());

                int contentsHash = s.trim().hashCode();
                String s1 = pas.getOrDefault(contentsHash, "NULL");
                pas.put(contentsHash, s1 + "->" + parallelism);
            }

        }

        for (Map.Entry<Integer, String> entry : pas.entrySet()) {
            System.out.println("contentsHash:" + entry.getKey() + "  " + entry.getValue());
        }

    }

    private static void printGraph(StreamGraph streamGraph) {
        System.out.println("================================= begin ================================");
        String streamingPlanAsJSON = streamGraph.getStreamingPlanAsJSON();
        System.out.println("streamGraph:" );
        System.out.println(streamingPlanAsJSON);


        String jobGraphJson = JsonPlanGenerator.generatePlan(streamGraph.getJobGraph());
        System.out.println("jobGraphJson:" );
        System.out.println(jobGraphJson);
        System.out.println("=================================  end  ================================");
    }

}
