# -*- coding: UTF-8 -*-

"""
这个模块主要是运行的实际视图，实际视图由Task和Data交错构成，第一个节点和最后一个节点必须是data节点
"""
import DCLib.Tool
import DCLib.Config
import datetime
import os
import random
from hdfs import InsecureClient


class BasicNode:
    def __init__(self, basic_type: str) -> None:
        self.basic_type = basic_type


# 函数提供输入data，输出data，以及要执行的内容类型，这是一个基类。
# 可能有多个或者单个输出
class TaskNode(BasicNode):
    def __init__(self, task_type: str) -> None:
        super().__init__("task")
        self.output_data = None
        self.input_data = None
        self.task_type = task_type
        # 这里加入一个task名称
        self.task_name = "task_" + str(random.randint(1, 99999999999999999999)) \
                         + datetime.datetime.now().strftime("%Y%m%d%H%M%S")

    def set_input_data(self, input_data):
        self.input_data = input_data

    def set_out_data(self, output_data):
        self.output_data = output_data


# 一个数据，数据只有可能来自于单个Task的运行结果，但是可以去往多个Task
# 以及这个数据如果存储
class DataNode(BasicNode):
    def __init__(self, data_type: str) -> None:
        super().__init__("data")
        self.task_from = None
        self.task_to_arr = []
        self.data_type = data_type

    def set_task_from(self, task_from: TaskNode):
        self.task_from = task_from

    def set_task_to_arr(self, task_to_arr):
        self.task_to_arr = task_to_arr

    def append_task_to_arr(self, task_to: TaskNode):
        self.task_to_arr.append(task_to)


# 本地DataNode，存在磁盘里面，使用一个url初始化
class LocalDataNode(DataNode):
    def __init__(self, file_name: str) -> None:
        super().__init__("local_disk_data")
        # 用一个文件初始化，产生一个string
        # 将文件读出来，然后初始化
        # 如果filename是空的，那就只创建文件
        if file_name == "":
            self.local_file_name = DCLib.Config.TMP_DIR + str(random.randint(1, 99999999999999999999)) \
                                   + datetime.datetime.now().strftime("%Y%m%d%H%M%S")
            open(self.local_file_name, "w").write("")
        else:
            map_str = DCLib.Tool.get_local_map_from_url(file_name)
            # print(map_str)
            self.local_file_name = DCLib.Config.TMP_DIR + str(abs(hash(map_str))) \
                                   + datetime.datetime.now().strftime("%Y%m%d%H%M%S")
            open(self.local_file_name, "w").write(map_str)

    def print_obj(self):
        print("{" + self.data_type + "," + self.local_file_name + "}")


class HadoopMrTaskNode(TaskNode):
    def __init__(self, map_task_script: str, reduce_task_script: str) -> None:
        super().__init__("hadoop_mr_task")
        self.map_task_script = map_task_script
        self.reduce_task_script = reduce_task_script
        # 开启一个默认的Hadoop环境，在未执行的时候就要开启
        DCLib.Tool.start_hadoop_env()
        # 环境默认的master节点号
        self.master_pod_name = DCLib.Config.MR_MASTER_POD_DEFAULT_NAME
        # 环境默认的HDFS地址和端口
        self.hdfs_ip = DCLib.Config.HDFS_DEFAULT_IP
        self.hdfs_port = DCLib.Config.HDFS_DEFAULT_PORT

    def run(self, recur: bool = False):
        # 查看前后data_node的位置
        if self.input_data.data_type == "k8s_hdfs_data":
            print("TODO: input in hdfs is not supported")
            DCLib.Tool.free_cache()
        elif self.input_data.data_type == "local_disk_data":
            # 数据拷贝
            # 根据taskname创建一个文件夹
            client = InsecureClient(url="http://" + self.hdfs_ip + ":" + self.hdfs_port, user='root', root="/")
            task_cache_dir = DCLib.Config.DEFAULT_HDFS_TMP + self.task_name
            client.makedirs(task_cache_dir)
            # 将数据拷贝
            DCLib.Tool.ugly_move_local_data_to_hdfs(DCLib.Config.HDFS_MASTER_POD_DEFAULT_NAME,
                                                    self.input_data.local_file_name, task_cache_dir)
            # client.upload(task_cache_dir, self.input_data.local_file_name, True)
            # 将两个脚本拷贝到容器中
            if self.map_task_script is None:
                # map是不能是None的，这是出错了
                print("HadoopMrTaskNode.run: map script can not be None")
                exit(-1)
            else:
                # 执行脚本拷贝
                DCLib.Tool.copy_local_file_to_pod(self.map_task_script, self.master_pod_name, "/")

            # 拷贝reduce脚本
            if self.reduce_task_script != "":
                # 拷贝reduce
                DCLib.Tool.copy_local_file_to_pod(self.reduce_task_script, self.master_pod_name, "/")

            MR_output_dir = ""
            # 输出文件类型检查
            if self.output_data.data_type == "k8s_hdfs_data":
                # 输出文件夹
                MR_output_dir = self.output_data.hdfs_file_dir
            else:
                print("HadoopMrTaskNode.run: output type is not k8s_hdfs_data")
                exit(-1)

            # 用斜杠分割字符串，取最后一项，作为Task
            unit = self.map_task_script.split("/")
            map_file_name = unit[len(unit) - 1]

            # 执行代码
            command = "kubectl exec " + self.master_pod_name + " -- hadoop jar " + DCLib.Config.STREAMING_JAR_LOCATION_IN_CONRAINER \
                      + " -D stream.non.zero.exit.is.failure=false " + " -input " + task_cache_dir + " -output " \
                      + MR_output_dir + " -mapper \"python3 " + map_file_name + "\""

            # 查看有没有reduce
            if self.reduce_task_script != "":
                unit = self.reduce_task_script.split("/")
                reduce_file_name = unit[len(unit) - 1]
                command = command + " -reducer \"python3 " + reduce_file_name + "\""
                # 如果有reduce就要把reduce脚本也考虑在内
                command = command + " -file /" + map_file_name + " -file /" + reduce_file_name
            else:
                command = command + " -file /" + map_file_name

            os.system(command)

    def print_obj(self):
        print("{" + self.map_task_script + "," + self.reduce_task_script + "," + self.master_pod_name + "}")


class HDFSDataNode(DataNode):
    # HDFS的节点，要用HDFS的ip端口和内部的地址初始化
    def __init__(self, hdfs_ip: str, hdfs_port: str):
        super().__init__("k8s_hdfs_data")
        # 初始化网络位置
        self.hdfs_ip = hdfs_ip
        self.hdfs_port = hdfs_port
        # datanote是一个目录，记录了一个文件的所有分区
        # 这个目录是随机的，创建一个随机数
        self.hdfs_file_dir = DCLib.Config.DEFAULT_HDFS_TMP + str(random.randint(1, 99999999999999999999)) \
                             + datetime.datetime.now().strftime("%Y%m%d%H%M%S")

        # MR的输出文件夹不能被实际创建
        # # 在hdfs中创建这一目录
        # client = InsecureClient(url="http://" + hdfs_ip + ":" + hdfs_port, user='root', root="/")
        # # 在hdfs中创建一个目录
        # client.makedirs(self.hdfs_file_dir)

    def print_obj(self):
        print("{" + self.hdfs_ip + "," + self.hdfs_port + "," + self.hdfs_file_dir + "}")


# 本地执行mapreduce
class LocalMapReduceNode(TaskNode):
    def __init__(self, map_task_script: str, reduce_task_script: str) -> None:
        super().__init__("hadoop_mr_task")
        self.map_task_script = map_task_script
        self.reduce_task_script = reduce_task_script

    def run(self, recur: bool = False):
        if self.input_data.data_type == "local_disk_data":
            local_input_file_name = self.input_data.local_file_name
            if self.output_data.data_type != "local_disk_data":
                print("output location error")
                exit(-1)
            local_output_file_name = self.output_data.local_file_name

            if not self.reduce_task_script == "":
                os.system(
                    "cat " + local_input_file_name + " | python " + self.map_task_script + " | sort -t ' ' -k 1 | python "
                    + self.reduce_task_script + " > " + local_output_file_name)
            else:
                os.system(
                    "cat " + local_input_file_name + " | python " + self.map_task_script + " | sort -t ' ' -k 1 | cat "
                    + " > " + local_output_file_name)

        else:
            print("datanode is not support this input in LocalMapReduceNode")
            # 这里可以执行不同平台时间的拷贝工作
            exit(-1)

        if recur:
            # 遍历执行所有的task
            for task_item in self.output_data.task_to_arr:
                task_item.print_obj()
                task_item.run(True)

    def print_obj(self):
        print("{" + self.task_type + "," + self.map_task_script + "," + self.reduce_task_script + "}")


# 本地Task，包括要执行的函数
class LocalTaskNode(TaskNode):
    def __init__(self, seq_task_script) -> None:
        super().__init__("local_seq_task")
        self.seq_task_script = seq_task_script

    # 这个seq是一个本地的python脚本，脚本以一个文件进行输入，输出也是一个文件，这个python脚本接收一个参数，就是要处理的文件名称
    def run(self, recur: bool = False):
        # 要运行一个本地脚本，根据前面的一个DataNode获取文件，然后根据后一个datanode决定输出
        # 首先查看前后data_node的位置
        if self.input_data.data_type == "local_disk_data":
            local_input_file_name = self.input_data.local_file_name
            if self.output_data.data_type != "local_disk_data":
                print("output location error")
                exit(-1)
            local_output_file_name = self.output_data.local_file_name
            os.system("python " + self.seq_task_script + " " + local_input_file_name + " " + local_output_file_name)

        else:
            print("datanode is not support this input in LocalTaskNode")
            # 这里可以执行不同平台时间的拷贝工作
            exit(-1)

        if recur:
            # 遍历执行所有的task
            for task_item in self.output_data.task_to_arr:
                task_item.print_obj()
                task_item.run(True)

    def print_obj(self):
        print("{" + self.task_type + "," + self.seq_task_script + "}")
