# -*- coding: utf-8 -*-
# @Time    : 2023.10.24 22:20
# @Author  : _oah
# @File    : get_debug_metrics.bak
# @Description : TODO

# 程序所用扩展包
import pandas as pd
import numpy as np
import requests
import yaml
import json
import os
import re
import difflib
# ==================================================================

# ==================================================================
# import ......
import logging
import flask_restful
# 自定义模块
from errors import my_abort, generate_response, ResponseCode

# 自定义错误
flask_restful.abort = my_abort
root_logger = logging.getLogger('root')
logger = logging.getLogger('main')


# ==================================================================


# ==================================================================
def function(Authorization, id):
    datas = [[]]
    # pklPath = current_app.config['PKL_PATH'] #建议把pklPath作为函数参数传入

    try:  # 简单搞一个粗放型的错误控制，TODO建议predict_*子函数通过抛出自定义异常控制错误代码
        value_qs = function_qs(Authorization, id)
        if value_qs['resp_code'] != 0:
            return value_qs
        datas = value_qs['datas']
        return generate_response(datas)
    except Exception as e:
        return generate_response(datas=datas, code=ResponseCode.ERROR, appendmessage='{0}'.format(str(e)))


def function_qs(Authorization, id):  #fileId2,
    res = {}

    logger.info('------------------')
    logger.info('调测工艺流程推荐开始')
    logger.info('------------------')
    try:
        # model= joblib.load(path_model)
        y_pred = dataProcess(Authorization, id)  #fileId2,
        logger.info('调测工艺流程推荐完成')
        # ......
        return generate_response(y_pred)
    except Exception as e:
        logger.error('调测工艺流程推荐失败，请检查文件名是否正确')
        res = generate_response(code=ResponseCode.WRONG_PARAM, appendmessage='{0}'.format(str(e)))
    return res


# def dataProcess(datapath1, datapath2, savepath):
def dataProcess(Authorization, id):  # fileId2,
    # 读取配置文件
    uploadurl, bucket, objectName, dataPath, downloadurl, savePath = readConfig()
    # 文件获取
    CSVFileName = id
    # ConstCSVFileName = download(Authorization, fileId2, downloadurl, savePath)
    # XMLFileName = download(Authorization, fileId2, downloadurl, savePath)

    # RE = dataPath + fileName + ".py"  # '.\\Data\\output\\commissioning_process.py'
    # dt = pd.read_csv(savePath + CSVFileName)  # '.\\Data\\input\\test_paramater.csv'

    # 获取所有.py文件名（不带后缀）
    py_files = [f[:-3] for f in os.listdir('./Data/input0') if f.endswith('.py')]

    # 创建包含序号和文件名的DataFrame
    list_df = pd.DataFrame({
        '序号': range(1, len(py_files) + 1),
        '调试项目名称': py_files
    })
    # 保存为list.csv
    list_df.to_csv('./Data/input0/list.csv', index=False, encoding='utf-8-sig')

    df = pd.read_csv('./Data/input0/list.csv')
    # a = savePath + XMLFileName  # xml
    #调试文件中所有项目名称列表
    unique_list = extract_unique_debug_items(savePath + CSVFileName)
    parameter_const = calculate_similarity(df, unique_list)
    #存下没加py的项目名
    fnc = parameter_const['调试项目名称'].tolist()
    parameter_const['调试项目名称'] = parameter_const['调试项目名称'].apply(lambda x: x + ".py")
    file_names = parameter_const['调试项目名称'].tolist()
    results = parse_files(file_names)

    # 剔除掉匹配到的项目名称

    result_out = [item for item in unique_list if item not in fnc]

    # 给剩下的没匹配到的加（空）
    result_out = [i + '(空)' for i in result_out]
    # 转字典
    result_out = dict.fromkeys(result_out)

    # 合并字典
    results = dict(results, **result_out)

    return results


# def download(Authorization, fileId, url, savePath):
#     """
#     将文件从minio下载下来
#     :param Authorization:
#     :param fileId:
#     :return:
#     """
#     headers = {
#         'Connection': 'keep-alive',
#         'Accept': 'application/json,tex/plain,*/*',
#         'X-Requested-with': 'XMLHttpRequest',
#         'Authorization': Authorization
#     }
#
#     Params = {
#         'id': fileId
#     }
#
#     datas = requests.get(url, headers=headers, params=Params)
#
#     savePath = savePath + fileId + ".csv"
#
#     with open(savePath, "wb") as code:
#         code.write(datas.content)
#
#     return fileId + ".csv"


def upload(Authorization, fileName, url, bucket, objectName, dataPath):
    dataPath = dataPath + fileName + ".py"
    print('datapath:')
    print(dataPath)

    headers = {
        'Connection': 'keep-alive',
        'Accept': 'application/json,tex/plain,*/*',
        'X-Requested-with': 'XMLHttpRequest',
        'Authorization': Authorization
    }

    files = {'uploadFile': (fileName, open(dataPath, 'rb'), 'application/json')}
    print('files:')
    print(files)

    data = {
        'bucket': bucket,
        'objectName': objectName
    }
    print('data:')
    print(data)
    #s = requests.Session()
    res = requests.post(url, headers=headers, files=files, data=data).text
    #res = s.post(url, headers=headers, files=files, data=data).text
    print('res__:')
    print(res)
    res = json.loads(res)
    fileId = res["data"]["fileId"]
    return fileId


def readConfig():
    """
    读取配置文件
    :return:
    """
    with open('./conf/application.yml', 'r', encoding='utf-8') as f:
        file_content = f.read()
        content = yaml.load(file_content, yaml.FullLoader)
        # upload
        uploadurl = content["fileUploadDownload"]["upload"]["url"]
        bucket = content["fileUploadDownload"]["upload"]["bucket"]
        objectName = content["fileUploadDownload"]["upload"]["objectName"]
        dataPath = content["fileUploadDownload"]["upload"]["dataPath"]
        # download
        downloadurl = content["fileUploadDownload"]["download"]["url"]
        savePath = content["fileUploadDownload"]["download"]["savePath"]
        return uploadurl, bucket, objectName, dataPath, downloadurl, savePath


def calculate_similarity(dataframe, target_strings):
    tmp = []
    lst_str = ['', ' ', '：', '。', '测试']

    for i in range(len(dataframe)):
        similar_scores = []
        for target_string in target_strings:
            similar_score = difflib.SequenceMatcher(lambda x: x in lst_str, target_string,
                                                    dataframe['调试项目名称'][i]).ratio()
            similar_scores.append(similar_score)
        max_score = max(similar_scores)
        tmp.append(max_score)

    dataframe['字符相似度'] = tmp
    dataframe = dataframe[dataframe['字符相似度'] == 1.0]  # 筛选出字符相似度等于1.0的行
    return dataframe


def parse_files(file_names):
    results = {}  # 用于存储解析结果的字典

    for recommendedResult in file_names:
        file_path = os.path.join('./Data/input0/', recommendedResult)
        if not os.path.exists(file_path):
            print(recommendedResult + '文件不存在')
        else:
            with open(file_path, 'r', encoding='utf-8') as file:
                output = []
                flag = 0
                for line in file:
                    line = line.replace(' ', '')
                    if '初始化' in line and line.startswith('def'):
                        flag = 1
                    elif line.startswith('def'):
                        flag = 0
                    if line == '\n':
                        continue
                    if line == '#':
                        continue
                    if flag == 0:
                        continue
                    if '仪器.' in line:
                        line = line.replace('\n', '')
                        result_l = re.findall(r"[(](.*?)[)]", line)
                        line = re.sub(u"\\(.*?\\)|\\{.*?}|\\[.*?]", "", line)
                        res = line.split('.')
                        res.append(result_l[0])
                        output.append(res)
                if len(output) == 0:
                    continue
                else:
                    output = pd.DataFrame(output)
                    output = output.iloc[:, -3:]
                    output.columns = ['仪器名称', '操作', '参数']
                    ans = {}
                    for i in range(len(output)):
                        tmp = {'仪器名称': output['仪器名称'][i], '操作': output['操作'][i], '参数': output['参数'][i]}
                        j = i + 1
                        stri = '第' + str(j) + '步'
                        ans[stri] = tmp
                    recommendedResult = recommendedResult.split('.')[0]
                    results[recommendedResult] = ans

    return results


def extract_unique_debug_items(csv_file_path):
    # 读取CSV文件
    data = pd.read_csv(csv_file_path)

    # 提取调试工步名称列
    temp = list(data['调试工步名称'])

    # 获取唯一的调试工步名称，并保持原始顺序
    steps = list(dict.fromkeys(temp))

    # 初始化存储调试项目名称的列表
    ans_list = []

    # 遍历每个调试工步名称
    for step in steps:
        # 从数据中选择与当前调试工步名称匹配的行
        dt = data.loc[data['调试工步名称'] == step, :]
        dt.index = range(len(dt))

        # 遍历当前调试工步的所有调试项目
        for subitem in dt['调试项目名称']:
            ans_list.append(subitem)

    # 去除重复的调试项目名称，得到唯一的列表
    unique_list = list(set(ans_list))

    return unique_list
