#!/usr/bin/python3
# -*- coding:utf-8 -*-
"""
@File: ReadLoaclConfig.py
@Author: lijk34925 
@Date: 2024/9/27 13:38
@desc: 请补充本模块功能说明
"""
import re

from AmlScripts.amlConfig import amlConfig
from BladeFerm.databases import DataBaseApp
from BladeFerm.databases.SqlDealApp import read_sql_file
from BladeFerm.utils.FileOerate import getFilesByPath
from BladeFerm.utils.OperationExcel import ExcelDeal
from BladeFerm.utils.basic_opration import run_command, is_dir_exist, delete_file
from BladeFerm.utils.dataoperation import YamlOperation
from BladeFerm.utils.jsonOperation import JsonOperation
from DataTest.restartServer.restartServer import logger
from DataTest.util.compareZip import compare_zip_files
from BladeFerm.utils.http_opration import DingRebot


def tip_ding(message, configPath='./config/金融反洗钱项目.yaml'):
    config = YamlOperation(configPath).readYaml()
    env_config = config.get('env')
    DingRebot(env_config.get('dingApiKey')).post(message, [], isAtAll=True, msgType='markdown')
    return True


def get_excel_content(excel_path, json_path):
    excel_content = ExcelDeal(excel_path).readExcelDatas()
    company_list = JsonOperation().read_json(json_path, "utf-8")
    # 获取公司号列表
    company_no_list_result = company_list.get("data")
    for item in company_no_list_result:
        company_name = item.get("short_name")
        company_no = item.get("customer_no")
        current = dict()
        for info in excel_content:
            cpy_name = info.get("客户")
            if re.search(cpy_name, item.get("name"), re.I):
                current.update(info)
                break
        if current:
            hang_ye = current.get("行业", "Null")
            db_type = current.get("数据库类型及版本", "Null")
            item["title"] = f"{company_name}({company_no})-{hang_ye}-{db_type}"
        else:
            item["title"] = f"{company_name}({company_no})"
    logger.info(f"公司号列表: {company_no_list_result}")
    return company_no_list_result


def download_zip_from_jfrog(root_path, file_name):
    fileList = file_name.split("artifactory/")
    file_name = fileList[-1]
    cmd_download = f".\\tools\jfrog.exe rt dl {file_name} {root_path} --flat=true"
    logger.info("下载文件命令: %s" % cmd_download)
    res = run_command(cmd_download)
    logger.info("下载文件结果: %s" % res)
    files = file_name.split("/")
    file_path = f"{root_path}{files[-1]}"
    logger.info(f"{file_name} 下载文件成功，本地存储目录为：{file_path}")
    return file_path


def download_and_compare_zip(name, raw, target):
    zip_path = './static/zip/'
    is_dir_exist(zip_path)
    # 下载文件
    raw_file_path = download_zip_from_jfrog(zip_path, raw)
    tar_file_path = download_zip_from_jfrog(zip_path, target)
    # 比较zip包的差异
    diff_result = compare_zip_files(raw_file_path, tar_file_path)
    fileA = raw.split("/")[-1]
    fileB = target.split("/")[-1]
    logger.info(f"比对结果为：{diff_result}")
    # 发送钉钉通知
    res_title = '不通过' if diff_result else '通过'
    res_content = '  \n'.join(diff_result)
    message = f"### 通知：\n"
    message += f"{fileA} &\n"
    message += f"{fileB} \n"
    message += f"比对结果：{res_title} , @{name} \n"
    if diff_result:
        message += f"{res_content}"
    tip_ding(message)
    # 删除临时文件
    delete_file(raw_file_path)
    delete_file(tar_file_path)
    return diff_result


#  批量导入SQL脚本
def unzip_file(filepath, target_path):
    cmd_unzip = f".\\tools\\unzip.exe -q {filepath} -d {target_path}"
    logger.info("解压命令: %s" % cmd_unzip)
    res = run_command(cmd_unzip)
    logger.info("解压文件结果: %s" % res)
    return res


def import_sql_files(env_name, tar_path, user, encoding='utf-8'):
    """
    批量导入SQL脚本：zip包解压，读取sql文件，执行sql脚本
    :param env_name: 环境名称
    :param tar_path: zip包路径
    :param user: 导入SQL所用的数据库的用户名和密码
    :return:
    """
    def __count_result(response: dict):
        success_num, fail_num = 0, 0
        fail_rows = list()
        for key, value in response.items():
            if len(value) == 0:
                success_num += 1
            else:
                fail_num += 1
                fail_rows.append(key+1)
        return success_num, fail_num, fail_rows
    # 解析用户名密码
    usr, pwd = user.split("/")
    # 初始化数据实例
    DB_APP = DataBaseApp()
    system_odbc_config = amlConfig.get(env_name).get('databases')
    system_odbc_config.update({'env': env_name, 'user': usr, 'password': pwd})
    logger.info(f'Init db link! system_odbc_config: {system_odbc_config}')
    DB_APP.make_db(**system_odbc_config)
    # 读取目录下所有的SQL文件
    files = getFilesByPath(tar_path)
    result = list()
    for file in files:
        try:
            logger.info(f"开始导入SQL文件： {file}")
            # 1.分析并获取sql
            sql_lines = read_sql_file(file, encoding=encoding)
            logger.info(f"文件名： {file} 导入sql行数： {len(sql_lines)}")
            # 2.执行sql
            sql_response = DB_APP.get_run_sql_result(sql_lines)
            # 3.统计结果
            sus_num, failed_num, failed_list = __count_result(sql_response)
            logger.info(f"文件名： {file} 导入sql成功： {sus_num} 行； 失败 {failed_num} 行")
            if failed_list:
                logger.error(f"文件名： {file} 导入sql失败行数： {failed_list}")
            result.append({'文件名': file, 'SQL行数': len(sql_response), '成功行数': sus_num, '失败行数': failed_num})
        except Exception as e:
            logger.error(f"文件名： {file} 导入sql失败，失败原因： {e}")
            # 发送钉钉通知
            message = f"### 通知： {env_name} 环境导入文件 {file} 失败！\n 具体结果到平台查看日志！连接：http://10.20.181.131:8082/#/aml/change"
            tip_ding(message)
    # 发送钉钉通知
    message = f"### 通知： {env_name} 环境批量导入数据完成！ \n 具体结果到平台查看日志！连接：http://10.20.181.131:8082/#/aml/change"
    tip_ding(message)
    return result


