package org.kumas.bigdata.hdfs3.ha;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RPC;
import org.kumas.bigdata.hdfs3.checkpoint.SnapShot;
import org.kumas.bigdata.hdfs3.constant.Constant;
import org.kumas.bigdata.hdfs3.protocol.FileSystemProtocol;
import org.kumas.bigdata.hdfs3.rpc.FileSystemProtocolImpl;
import org.kumas.bigdata.hdfs3.protocol.HAServiceProtocol;
import org.kumas.bigdata.hdfs3.rpc.HAServiceProtocolImpl;

import java.io.IOException;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;

/*************************************************
 * TODO_Kumas
 *  Author： KumasZhang
 *  DateTime： 2021-12-05 9:49
 *  Description： 
 **/
public class FSMaster {
    public static void main(String[] args) {
        try {
            RPC.Server service = new RPC.Builder(new Configuration())
                    .setProtocol(FileSystemProtocol.class)
                    .setInstance(new FileSystemProtocolImpl())
                    .setBindAddress(Constant.RPC_SERVER_ADDR)
                    .setPort(Constant.RPC_SERVER_SERVICE_PORT)
                    .build();

            RPC.Server haService = new RPC.Builder(new Configuration())
                    .setProtocol(HAServiceProtocol.class)
                    .setInstance(new HAServiceProtocolImpl())
                    .setBindAddress(Constant.RPC_SERVER_ADDR)
                    .setPort(Constant.RPC_SERVER_HA_PORT)
                    .build();

            service.start();
            haService.start();

            //snapshot定时任务
            ScheduledThreadPoolExecutor exec = new ScheduledThreadPoolExecutor(2);
            exec.scheduleAtFixedRate(new SnapShot(), 1000, 2000, TimeUnit.MILLISECONDS);//每2秒执行一次
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}
