# -*- coding: utf-8 -*-
import os
import sys
from concurrent.futures import ThreadPoolExecutor
import subprocess
import datetime


# 开始运行脚本
def run_shell(cmd_str):
    cur_time = datetime.datetime.now()
    print(f"{cur_time}: 开始执行指令：" + cmd_str + '\n')
    result = subprocess.call(cmd_str, shell=True)
    cur_time = datetime.datetime.now()
    print(f"{cur_time} 执行结束。\n")
    result


# 将文件转换为hive表
def file_parse_to_hive_table(file_name):
    cur_dir = os.getcwd()
    # cmd = f'nohup  hdfs dfs -put {cur_dir}/upload/{file_name}  oss://emr-datalake.cn-beijing.oss-dls.aliyuncs.com/test/   ;  ' \
    #       f' sh 2>&1 & '

    cmd = f'nohup sh  {cur_dir}/shell/qm_pyspark_submit.sh {file_name} > {cur_dir}/logs/{file_name}_log.log'
    run_shell(cmd)


