# coding=utf-8
import json
import os
import re
import sys
import boto3
from smart_open import open
import subprocess

sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))


class Operation_s3():
    def __init__(self):
        # url = "https://{}".format(FORMAL_S3_CONFIG['s3_endpoint'])
        # self.s3_client = boto3.client('s3',aws_access_key_id = "AKIAWTBG3E42IMYODWHL", aws_secret_access_key = "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",
        #                   region_name="cn-northwest-1")
        self.s3_client = boto3.client('s3')

    def split_path(self, path):
        start = path.index("//")
        bucket = path[start + 2:].split("/")[0]
        key = path[(path.index(bucket)) + len(bucket) + 1:]
        return bucket, key

    def concat_path(self, bu, object):
        return "s3://" + bu + "/" + object

    def ls(self, bucket_and_path):
        parts = bucket_and_path.split('/')
        bucket, prefix = parts[2], '/'.join(parts[3:])
        if not prefix.endswith('/'):
            prefix += '/'
        paginator = self.s3_client.get_paginator('list_objects')
        page_iterator = paginator.paginate(Bucket=bucket, Prefix=prefix)
        rs = []
        for response in page_iterator:
            for content in response.get('Contents', []):
                key = content['Key']
                rs.append({'bucket': bucket, 'object': key, 'LastModified': content['LastModified']})
        return rs


job_list_path = "s3://streaming/flink/conf/job-list-"
check_point_root_path = "s3://streaming/flink/checkpoint/"
s3 = Operation_s3()
jar_parth = ""

transport_params = {'client': s3.s3_client}


# 读取任务启动参数文件
def read_job_conf(env):
    path = f"{job_list_path}{env}.json"
    print(f"read job list conf {path}")
    with open(path, 'r', transport_params=transport_params) as file:
        conf = json.load(file)
        names = []
        for name, p in conf.items():
            if "enable" in p and str(p['enable']).lower() == "false":
                names.append(name)
        for name in names:
            del conf[name]
    if not conf:
        raise Exception(f" job list file {path} is not available")
    return conf


def restart(job_name, params, env, from_ck):
    # 从运行参数中获取检查点路径
    # ma = re.match(".*(state\.checkpoints\.dir=)([^ ]+).*", params['runparam'])
    # if ma and len(ma.groups()) >= 2:
    #     checkpoint_path = str(ma.group(2))
    # else:
    #     raise Exception(f" run params {params} not contains state.checkpoints.dir")
    # print(f" job {job_name} checkpoint path:{checkpoint_path}")

    class_name = job_name.split('-')[0]

    # 检查该任务是否正在运行
    running_list_command = """ yarn application -list |grep """ + job_name + """ | awk '{print $1}' """
    print(f"get job running list:{running_list_command}")
    rs = subprocess.check_output(running_list_command, shell=True).decode().split("\n")
    print("running app:", rs)

    kill_command = "yarn application -kill "
    # 如果正在运行 kill当前任务
    for appid in rs:
        if appid and len(appid) > 0:
            print(f"kill job:{kill_command + appid}")
            kill_rs = subprocess.check_output(kill_command + appid, shell=True)
            print("kill result ", kill_rs)

    checkpoint_path = check_point_root_path + job_name + "-" + env
    # 找到该任务的最新checkpoint位置
    ck = get_lastest_ck(checkpoint_path)

    # 从checkpoint启动任务
    if (not params['fromck'] or str(params['fromck']).lower() != "false") and from_ck.lower() != "false" and ck != "":
        run_command = params['runparam'] + f" -s  {ck} "
    else:
        run_command = params['runparam']
    iot_conf = params['iotconf'] if 'iotconf' in params else ''
    run_command = f"""{run_command} -D yarn.application.name={job_name} -D state.checkpoints.dir={checkpoint_path} -d -c com.hongmei.iot.streaming.JobEnter -t /etc/hive/conf/hive-site.xml ~/flink-jar/hm-iot-streaming.jar --iot.conf execution.job.class=com.hongmei.iot.streaming.job.{class_name} conf.file=s3://streaming/flink/conf/{env}-application.conf {iot_conf}"""
    print(f"run command:{run_command}")
    run_rs = subprocess.check_output(run_command, shell=True)
    print("running result:", run_rs)
    print(f"start complete {job_name}")


def get_lastest_ck(checkpoint_path):
    ck_path = checkpoint_path + "/"
    latest_path = ""
    latest_time = ""
    bucket, key = s3.split_path(ck_path)
    for one in s3.ls(ck_path):
        if str(one['object']).endswith("_metadata"):
            if one['LastModified'].strftime("%Y-%m-%d %H:%M:%S") > latest_time:
                latest_time = one['LastModified'].strftime("%Y-%m-%d %H:%M:%S")
                latest_path = one['object']
    if latest_path == "":
        return ""
    return s3.concat_path(bucket, latest_path.replace("_metadata", ""))


def get_env():
    instance_info = subprocess.check_output("cat /mnt/var/lib/info/job-flow.json", shell=True)
    cluster_id = json.loads(instance_info)['jobFlowId']
    cluster_info = subprocess.check_output(f"aws emr describe-cluster --cluster-id {cluster_id}", shell=True)
    cluster_name = json.loads(cluster_info)['Cluster']['Name']
    if cluster_name == "streaming-prod":
        env = "prod"
    elif cluster_name == "streaming-test":
        env = "test"
    else:
        raise Exception(f" emr cluster name error,need streaming-prod or streaming-test")
    return env


def run(job_names: str, from_ck: str):
    # 获取环境 test或者prod
    env = get_env()
    # 读取需要运行的任务列表
    job_list = read_job_conf(env)

    # 处理需要启动的任务
    if job_names == "all":
        for k, v in job_list.items():
            restart(k, v, env, from_ck)
    else:
        restart(job_names, job_list[job_names], env, from_ck)


# 参数1 需要运行的任务名称，如果全部运行传入all
# 参数2 环境：prod或者test
if __name__ == '__main__':
    # event = sys.argv
    # 需要运行的job名称
    if len(sys.argv) <= 1:
        print("input param error,need input job name,if start all jobs,type in [all]")
    jobs = sys.argv[1]
    if len(sys.argv) >= 3:
        from_ck = sys.argv[2]
    else:
        from_ck = "true"
    run(jobs, from_ck)
    # read_job_conf("prod")
