import datetime
import json
import os

import time
import re
import shutil
import signal
import subprocess
import csv
import yaml

from subprocess import check_output

from job.src.function.record_log import mylog


def parse_file(filepath, flag=2):
    '''
    解析markdown文件，根据“#”提取目录，返回列表形式的目录结构
    :param filepath: 文件路径
    :param flag: 默认从二级目录开始查找，找3层。
    :return:
    '''
    with open(filepath, 'r', encoding='utf-8') as f:
        contents = f.readlines()
    new_list = []
    sec_list = []
    thd_list = []
    for line in contents:
        if re.match(r"#", line):
            res = line.split(' ')
            title = " ".join(res[1:]).rstrip('\n')
            # print("res: ", res)
            if res[0] == "#"*flag:
                if thd_list:                  # 如是第三级不为空，先顺序添加到上一节点的第二级
                    sec_list.append(thd_list)
                if sec_list:                  # 如是第二级不为空，先顺序添加到上一节点的第一级
                    new_list.append(sec_list)
                new_list.append(title)        # 添加新的一级节点或下一节的一级节点
                thd_list = []                 # 清空三级节点列表
                sec_list = []                 # 清空二级节点列表
            elif res[0] == "#"*(flag+1):
                if thd_list:
                    sec_list.append(thd_list)
                sec_list.append(title)
                thd_list = []
            elif res[0] == "#"*(flag+2):
                thd_list.append(title)
            else:
                pass
    # 添加最后一组到列表
    if thd_list:
        sec_list.append(thd_list)
    if sec_list:
        new_list.append(sec_list)
    return new_list

def parse_file_two(filepath, flag=2):
    with open(filepath, 'r', encoding='utf-8') as f:
        contents = f.readlines()

def read_file(filepath, limit=0):
    try:
        with open(filepath, 'r', encoding='utf-8') as f:
            if limit == 0:
                content = f.read()
            else:
                lines = f.readlines()
                rows = len(lines)
                limit = limit if limit <= rows else rows
                content = ''.join(lines[-limit:])
    except Exception as e:
        content = f'Read file error! Exception is {e}'
    return content

def write_file(filepath, content):
    with open(filepath, 'w', encoding='utf-8', newline='') as f:
        f.write(content)
    return True

def write_json(filename, data):
    with open(filename, 'w', encoding='utf-8') as file_obj:
        json.dump(data, file_obj, indent=4, ensure_ascii=False)
    return True

def read_json(filename):
    with open(filename, 'r', encoding='utf-8') as file_obj:
        content = json.load(file_obj)
    return content

def is_dir_exist(path):
    try:
        if not os.path.exists(path):
            os.makedirs(path)
        return True
    except Exception as e:
        mylog.error(e)
        return False

def create_dir(dirList):
    res = False
    for path in dirList:
        res = is_dir_exist(path)
    return res


def check_contain_chinese(check_str):
    for ch in check_str.decode('utf-8'):
        if u'\u4e00' <= ch <= u'\u9fff':
            return True
    return False


def get_curtime(format_str = "%Y-%m-%d %H:%M:%S"):
    ct = time.strftime(format_str, time.localtime())
    return ct

def get_TargetTime(otherDay=1, format='%Y-%m-%d'):
    today = datetime.date.today()
    tomorrow = today + datetime.timedelta(days=otherDay)
    return tomorrow.strftime(format)

def delete_dir(path):
    try:
        shutil.rmtree(path)
        mylog.info(f'删除目录：{path}')
        return True
    except Exception as e:
        mylog.error(e)
        return False

def delete_file(filepath):
    try:
        if os.path.exists(filepath):
            os.remove(filepath)
        return True
    except Exception:
        return False

def read_csv(filename):
    res_con = []
    with open(filename, 'r') as f:
        content = csv.reader(f)
        headline = next(content)
        hl = ','.join(headline)
        res_con.append(hl)
        for line in content:
            sl = ''
            for unit in line:
                if unit == '':
                    sl += "'',"
                elif unit == ' ':
                    sl += "' ',"
                else:
                    sl += unit+','
            res_con.append(sl)
    return res_con

def write_csv(file_name, rows, headers=None):
    '''
    写csv操作
    :param file_name: 文件名
    :param rows: [{k:v, k1:v1}{k:v}]
    :param headers: [k, k1]
    :return:
    '''
    with open(file_name, 'a')as f:
        f_csv = csv.DictWriter(f, headers)
        if headers is not None:
            f_csv.writeheader()         # 如果需要写入标题就可以
        f_csv.writerows(rows)
        f.close()

def list_to_dic(tar_list):
    res_dic = {}
    for unit in tar_list:
        if unit:
            lst = unit.split(',')
            res_dic[lst[0]] = lst[1]
    return res_dic


def rename_file(srcFile, new_name):
    path, filename = os.path.split(srcFile)
    dstFile = os.path.join(path, new_name)
    try:
        os.rename(srcFile, dstFile)
    except Exception as e:
        print(e)
        print('rename file fail\r\n')
    else:
        print('rename file success\r\n')

def query_files_zip(dirpath, fileType=None):
    res_lst = os.listdir(dirpath)
    # mylog.info(f"res_lst: {res_lst}")
    zip_list = []
    for path in res_lst:
        path_sec = os.path.join(dirpath, path)
        if os.path.isdir(path_sec):
            sec_list = os.listdir(path_sec)
            # mylog.info(f"sec_list: {sec_list}")
            for file in sec_list:
                ext = file.split('.')[-1]
                if ext and ext in fileType:
                    filename, ext = os.path.splitext(path)
                    mylog.info(f"filename: {filename}, ext: {ext}")
                    zip_file = os.path.join(path_sec, file)
                    zip_list.append(zip_file)
    return zip_list


def query_target_files(dirPath, fileType=None, flag=True):
    '''
    查找目录下的目标类型文件
    :param tar_path: 目标目录，迭代搜索目标目录下的目标类型文件
    :param fileType: ['.zip', '.rar']
    :return:
    '''
    def query_dir(tar_path, check):
        res_lst = os.listdir(tar_path)
        mylog.info(res_lst)
        for path in res_lst:
            path_sec = os.path.join(tar_path, path)
            if os.path.isdir(path_sec) and check:
                query_dir(path_sec, check)
            else:
                filename, ext = os.path.splitext(path)
                if fileType:
                    if ext in fileType:
                        dir_list.append(path_sec)
                    else:
                        mylog.info(f'{path} is not target!')
                else:
                    dir_list.append(path_sec)
    dir_list = []
    if os.path.exists(dirPath):
        query_dir(dirPath, flag)
    if dir_list:
        dir_list = [unit.replace('\\', '/') for unit in dir_list]
    mylog.info(dir_list)
    return dir_list

def query_target_dirs(dirPath, flag=True):
    '''
    查找目录下的目标类型文件
    :param tar_path: 目标目录，迭代搜索目标目录下的目标类型文件
    :param fileType: ['.zip', '.rar']
    :return:
    '''
    def query_dir(tar_path, check):
        res_lst = os.listdir(tar_path)
        mylog.info(res_lst)
        for path in res_lst:
            path_sec = os.path.join(tar_path, path)
            if os.path.isdir(path_sec) and check:
                dir_list.append(path_sec)
                query_dir(path_sec, check)
    dir_list = []
    if os.path.exists(dirPath):
        dir_list.append(dirPath)
        query_dir(dirPath, flag)
    if dir_list:
        dir_list = [unit.replace('\\', '/') for unit in dir_list]
    mylog.info(f'query dirlist: {dir_list}')
    return dir_list

def queryDirsMutil(dirList):
    dir_list = []
    for dir in dirList:
        tempList = query_target_dirs(dir)
        dir_list = dir_list + tempList
    return dir_list


def query_dirs(tar_path):
    dir_list = []
    if os.path.exists(tar_path):
        res_lst = os.listdir(tar_path)
        for path in res_lst:
            if os.path.isdir(path):
                path2 = os.path.join(tar_path, path)
                dir_list.append(path2)
    return dir_list

def run_command(cmd):
    p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    return p.stdout.readlines()

def rewritefile(delay=0):
    time.sleep(delay)
    str2 = '''
def reload():
    print('%s')
    return True
    ''' % str(time.ctime())
    file = 'job/src/application/reload.py'
    # file = '../application/reload.py'
    with open(file, 'w', encoding='utf-8') as f:
        f.write(str2)
    return True

def run_bat_script(file_path, delay=0):
    mylog.info(f'Run bat file: {file_path}')
    fp, fn = os.path.split(file_path)
    cur_path = os.getcwd()
    os.chdir(fp)
    time.sleep(delay)
    os.startfile(fn)
    mylog.info(f'{file_path} runs successfully!')
    os.chdir(cur_path)
    return os.getcwd()

def split_path(filepath):
    '''
    分隔文件全路径，返回（文件路径，文件名，文件后缀）
    :param filepath:
    :return: （文件路径，文件名，文件后缀）
    '''
    fpath, fname = os.path.split(filepath)
    b_name, a_ex = os.path.splitext(fname)
    return fpath, fname, b_name, a_ex

def killProcess(port, pattern):
    cmd1 = "net -aon | findstr %s" % port
    processes = run_command(cmd1)
    for unit in processes:
        line = unit.decode()
        res = re.search(pattern, line.rstrip('/r/n'))
        if res:
            pid = res[1]
            cmd2 = "taskkill /pid %s /f -t" % pid
            res2 = run_command(cmd2)
            mylog.info(res2)
    return True

def run_bat(bat_name):
    fpath, fname = os.path.split(bat_name)
    b_name, a_ex = os.path.splitext(fname)
    log_name = '%s_result.txt' % b_name
    run_log_path = os.path.join(fpath, log_name)
    cur_dir = os.getcwd()
    os.chdir(fpath)
    try:
        bat_cmd = fname + ' >' + log_name
        p = subprocess.Popen(bat_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        p.communicate(timeout=1200)
        statu = p.returncode
        mylog.info('log route:%s' % run_log_path)
        if statu == 0:
            mylog.info('%s update sucsess!' % fpath)
        else:
            mylog.info('%s update fail!' % fpath)
    except Exception as e:
        mylog.error(e)
    finally:
        os.chdir(cur_dir)
    return run_log_path

def run_bat2(bat_name):
    cur_time = get_curtime('%Y%m%d%H%M%S')
    fpath, fname = os.path.split(bat_name)
    b_name, a_ex = os.path.splitext(fname)
    log_name2 = '%s_result_%s.txt' % (b_name, cur_time)
    run_log_path = os.path.join('./logs', log_name2)
    fh = open(run_log_path, 'w', encoding='utf-8')
    cur_dir = os.getcwd()
    os.chdir(fpath)
    log_name = '%s_result.txt' % b_name
    bat_cmd = fname # + ' >' + log_name
    # try:
    p = subprocess.Popen(bat_cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
    print(p.stdout.readlines())
    for line in p.stdout.readlines():
        print(line.decode())
    # while True:
    #     stdout, stderr = p.communicate()    # 记录错误日志文件
    #     print(stdout)
    #     returncode = p.returncode
    #     print(returncode)
    #     if returncode == 0:
    #         print('执行结果：执行成功！')
    #         fh.write(f'{stdout}\r\n')
    #     if returncode != 0:
    #         print('执行结果：执行失败！')
    #         fh.write(f'{stdout}\r\n')
    #     if stdout == '' and p.poll() != None:
    #         break
    os.chdir(cur_dir)
    # except Exception as re:
    #     print("调用hive客户端，执行hive语句出错")
    #     print(str(re))
    # finally:
    #     os.chdir(cur_dir)



def rewrite_bat(filepath, tar_dict):
    path, name  = os.path.split(filepath)
    filename = 'install_%s.bat' % tar_dict['tnsname']
    new_path = os.path.join(path, filename)
    with open(filepath, 'r', encoding='gbk') as fn:
        lines = fn.readlines()
    newlines = []
    for line in lines:
        res = re.match('set /p (.*)=', line)
        res2 = re.match('pause ', line)
        if res is not None and res2 is None:
            new_str = 'set %s=%s\n' % (res[1], tar_dict[res[1]])
            newlines.append(new_str)
        elif res2 is not None and res is None:
            new_str2 = 'echo contine \n'
            newlines.append(new_str2)
        else:
            newlines.append(line)
    with open(new_path, 'w', encoding='gbk') as fn2:
        fn2.writelines(newlines)
    return new_path

def judge_type(filepath):
    if re.search('FERM20-平台基础升级包', filepath) is not None:
        f_type = 'bat_frem'
        return f_type
    if re.search('FSK-NC-数据库升级包', filepath) is not None:
        f_type = 'bat_nc'
        return f_type
    if re.search('FSK-数据库升级包', filepath) is not None:
        f_type = 'bat_fsk'
        return f_type

def sort_db_lst(file_lst):
    target_file = []
    new_file_lst = []
    if len(file_lst) > 0:
        for filepath in file_lst:
            if re.search('FERM20-平台基础升级包', filepath) is not None:
                new_file_lst.append(filepath)
                target_file.append('FERM20-平台基础升级包')
        for filepath in file_lst:
            if re.search('FSK-数据库升级包', filepath) is not None:
                new_file_lst.append(filepath)
                target_file.append('FSK-数据库升级包')
        for filepath in file_lst:
            if re.search('FSK-NC-数据库升级包', filepath) is not None:
                new_file_lst.append(filepath)
                target_file.append('FSK-NC-数据库升级包')
    return new_file_lst, target_file

def select_db(db_list, num=1):
    '''
    从列表中筛选出fsk，nc，flow等3中数据包
    :param db_list: 原始列表
    :param num: 根据num参数取最新的n个，当num大于分类后包的数量，展示所有
    :return:
    '''
    af_num = -num
    fnc = []
    fsk = []
    flow = []
    mylog.info('远程获取的FSK,FSK-NC,工作流如下：')
    for file in db_list:
        mylog.info(str(file))
        # path, name = os.path.split(file)
        r1 = re.search('FSK-数据库升级包', file)
        r2 = re.search('FSK-NC-数据库升级包', file)
        if r1 is not None and r2 is None:
            fsk.append(file)
        elif r1 is None and r2 is not None:
            fnc.append(file)
        else:
            flow.append(file)
    if len(fsk) >= num:
        fsk2 = []
        for unit in fsk[af_num:]:
            fsk2.append(unit)
    else:
        fsk2 = fsk
    if len(fnc) >= num:
        fnc2 = []
        for unit in fnc[af_num:]:
            fnc2.append(unit)
    else:
        fnc2 = fnc
    if len(flow) >= num:
        flow2 = []
        for unit in flow[af_num:]:
            flow2.append(unit)
    else:
        flow2 = flow
    new_list = fsk2 + fnc2 + flow2
    return fnc, fsk, flow, new_list

def read_yaml(yaml_path):
    with open(yaml_path, 'r', encoding='utf-8') as f:
        cons = yaml.safe_load(f.read())
    return cons

def write_yaml(yaml_path, content):
    # 写入到yaml文件
    with open(yaml_path, 'w', encoding="utf-8") as f:
        yaml.dump(content, f, allow_unicode=True, default_flow_style=False, width=1000)
    return True

def read_log():
    log_path3 = 'logs/RunInfo_%s.log' % time.strftime("%Y%m%d", time.localtime())
    with open(log_path3, 'r', encoding='gbk') as fn:
        lines = fn.readlines()
    length = len(lines)
    return lines, length

def show_logs():
    lines, length = read_log()
    for line in lines:
        if re.search(' INFO] ', line):
            print(line)

def judge_file_newest(filename):
    mylog.info(f'Judge filename: {filename}')
    time_str = get_curtime('%Y%m%d')
    result = re.search(time_str, filename)
    if result is None:
        mylog.info("查询为空！")
        return False
    else:
        mylog.info(result[0])
        return True

def check_fail_info(fileanme, fail_flags=None, encode='utf-8'):
    '''
    运行日志分析，查找错误信息
    :param fileanme: 运行日志文件名称
    :param fail_flags: 失败信息标准
    :return:
    '''
    fail_lines = []
    with open(fileanme, 'r', encoding=encode) as f:
        cons = f.readlines()
    mylog.info(f'以下是脚本运行失败的信息：')
    runlog = cons.copy()
    i = 0
    while i < len(cons):
        if cons[i] != '' and fail_flags is not None:
            for flag in fail_flags:
                res = re.search(flag, cons[i])
                if res is not None:
                    mylog.error('************************失败信息如下************************')
                    fail_lines += runlog[i-5: i+1]
                    fail_info = '\n'.join(fail_lines)
                    mylog.info(f'{fail_info}')
        i += 1
    return fail_lines


def parse_log(fileanme, success_flag='升级成功', fail_flags=None):
    fp, fn = os.path.split(fileanme)
    name, ext = os.path.splitext(fn)
    new_name = name + "_%s" % get_curtime("%Y%m%d%H%M%S") + ext
    with open(fileanme, 'r', encoding='gbk') as f:
        con = f.read()
    bak_name = os.path.join('./logs', new_name)
    with open(bak_name, 'w', encoding='utf-8') as f2:
        f2.write(con)
    if re.search(success_flag, con) is not None:
        mylog.info(f'{fp} 升级成功！')
        mylog.info(f'脚本运行日志：{bak_name}。')
        return True
    else:
        mylog.warn(f'{fp} 升级失败！')
        mylog.info(f'详细失败信息请查看脚本运行日志：{bak_name}。')
        mylog.info('若是因为编译原因，请人工到数据库进行无效编译。其他问题可先解决再重新升级！')
        check_fail_info(bak_name, fail_flags)
        return False

# linux 系统试用
def get_pid(name):
    return map(int, check_output(["pidof", name]).split())

def get_process_id(name):
    child = subprocess.Popen(['pgrep', '-f', name],stdout=subprocess.PIPE, shell=False)
    response = child.communicate()[0]
    return [int(pid) for pid in response.split()]

def kill(pid_lst):
    if not isinstance(pid_lst, list):
        pid_lst = [pid_lst]
    try:
        for pid in pid_lst:
            a = os.kill(pid, signal.SIGKILL)
            # a = os.kill(pid, signal.9) #　与上等效
            mylog.info('已杀死pid为%s的进程,　返回值是:%s' % (pid, a))
    except OSError as e:
        mylog.warn('没有如此进程!!!')
    return True


if __name__ == '__main__':
    # res = get_TargetTime(-3, "%Y-%m-%d")
    # print('res:', res)
    rewritefile()