import json
import os
from pathlib import Path
import pandas as pd
from numpy import mean
from src.utils.loggers import db_logger


from src.database.db_engine import engine_coordinate as engine


src_Dir = os.path.dirname(os.path.dirname(__file__))


def get_file_path(file_dir=None):
    '''
    在static文件自动查找data数据地址
    :param file_dir:
    :return:
    '''
    if file_dir is None:
        rel_file = 'static/data_temp/'
    else:
        rel_file = file_dir
    full_file_dir = Path(src_Dir) / rel_file

    if not full_file_dir.exists():
        db_logger.error(f"目录不存在: {full_file_dir}")

    excel_files = [f for f in full_file_dir.iterdir()
                   if f.is_file() and f.suffix.lower() in ['.xlsx', '.xlsm']]

    if not excel_files:
        db_logger.error("未找到 Excel 文件")
        return None

    # 返回完整路径
    # excel_files[0].absolute()
    abs_path = excel_files[0].resolve()
    db_logger.info(f"找到 Excel 文件: {abs_path}")

    return abs_path


def read_excel_data(
        chexing=None,
        file_path=None,
        header=5,
        usecols=None):
    '''
    读取excel数据,加工成需求的dataframe，
    列名【特征点号，方向，名义值，VIN号】
    行按点号显示
    数据按偏差值
    返回值是一个数组,分别是df数据和100列的vin清单字典
    '''

    if file_path is None:
        file_path = get_file_path()

    if usecols is None:
        usecols = list(range(0, 3)) + list(range(104, 204))

    db_logger.info(f'file_path为: {file_path}')

    df_0 = pd.read_excel(file_path, sheet_name='Data', header=header,
                         index_col=None, usecols=usecols)
    db_logger.info(f'df_0 shape为: {df_0.shape}')

    # 删除多余4行
    df_1 = df_0.drop(labels=[0, 1, 2, 3])

    # 前三列重命名
    df_2 = df_1.rename(columns={df_1.columns[0]: '特征点号',
                                df_1.columns[1]: '方向',
                                df_1.columns[2]: '名义值'})

    # 筛选出有效数据
    df_3 = df_2[df_2['方向'].isin(['X', 'Y', 'Z'])]

    # 补全点号
    # df_3.loc[:'特征点号'].fillna(method='ffill', limit=2, axis=0, inplace=True)
    # df_3['特征点号'].fillna(method='ffill', limit=2, inplace=True)
    df_3['特征点号'] = df_3['特征点号'].ffill(limit=2)

    db_logger.info(f'df_3 shape为: {df_3.shape}')

    # 转换为偏差值
    df_40 = df_3.copy()
    for colname in df_40.columns:
        if colname not in ['特征点号', '方向', '名义值']:
            df_40.loc[:, colname] -= df_40.loc[:, '名义值']
            # FIXME 保留2位小数，减小前后端数据传递的负担
            df_40.loc[:, colname] = df_40.loc[:,
                                              colname].apply(round, args=(2,))
    df_4 = df_40.copy()

    db_logger.info(f'df_4 shape为: {df_4.shape}')

    # 增加测点功能列描述
    point_name_dict = read_point_name()
    df_4.insert(2, '测点功能', df_4['特征点号'])
    df_4['测点功能'] = [point_name_dict.get(x[:6], '未查到')
                    for x in list(df_4['特征点号'])]

    # 转换数据dtype格式
    df5 = df_4.convert_dtypes()
    db_logger.info(f'df5 shape为: {df5.shape}')

    # 把列名中带有 .1 后缀的清洗掉
    df6 = df5.rename(columns=lambda x: x[:17] if x.startswith('LNB') else x)

    # 把列名统一,以便于后续生成models模型
    lastOrder = [f'last_{order}' for order in range(1, 101)]

    # 实际的vin清单，可能会少于100个
    vinList = list(df6.columns[4:104])
    db_logger.info(f'vinList为: {vinList}')

    for i in range(100):
        try:
            vinList[i]
        except:
            vinList.append(f'null{i+1}')

    columnVinDict = dict(zip(lastOrder, vinList))
    db_logger.info(f'columnVinDict为: {columnVinDict}')

    # print(columnVinDict)

    # 新建空列并赋值0
    for col in vinList:
        if col[:4] == 'null':
            df6[col] = 0

    # 修改列名

    df6.columns = (list(df6.columns[0:4]) + lastOrder)

    db_logger.info(f'df6 shape为: {df6.shape}')

    df = df6

    # 把近一百台的vin号一起传递出去
    return df, columnVinDict


def saveColumnVinDict(chexing, columnVinDict):
    columnVinDictJsonFile = f'columnVinDict_{chexing}.json'
    print('columnVinDictJsonFile', columnVinDictJsonFile)
    with open(os.path.join(src_Dir, "static/", columnVinDictJsonFile), 'w', encoding='utf-8') as f:
        json.dump(columnVinDict, f, ensure_ascii=False)


def readColumnVinDict(chexing='F06'):
    columnVinDictJsonFile = f'columnVinDict_{chexing}.json'
    db_logger.info(f'columnVinDictJsonFile为: {columnVinDictJsonFile}')
    with open(os.path.join(src_Dir, "static/", columnVinDictJsonFile), 'r', encoding='utf-8') as f:
        columnVinDict = json.load(f)
    # db_logger.info(f'columnVinDict为: {columnVinDict}')
    return columnVinDict


def read_point_name(filepath=None):
    if filepath is None:
        filepath = os.path.join(src_Dir, 'static/point_name_dict.json')
    with open(filepath, 'r', encoding='utf8') as f:
        point_name_dict = json.load(f)

    return point_name_dict


def refresh_database(df, chexing='F06'):
    db_table_name = 'coorditemp_'+chexing
    df.to_sql(db_table_name, engine, if_exists='replace')


def read_database(chexing='F06'):
    db_table_name = 'coorditemp_'+chexing
    try:
        df_read = pd.read_sql_table(db_table_name, engine, index_col='index')
    except:
        df, colVin = read_excel_data()
        df.to_sql(db_table_name, engine,  if_exists='replace')
        df_read = pd.read_sql_table(db_table_name, engine, index_col='index')
    return df_read


def point_select(df_readsql, point_list=None, direction='X', vin_list=None):
    '''
    按照点号，方向，vin 清单，查询出数据
    :param df_readsql:
    :param point_list:
    :param direction:
    :param vin_list:
    :return:
    '''

    if point_list is None:
        point_list = ['KN0001L', 'KN0002L', 'KN0003L']
    if vin_list is None:
        vin_list = ['特征点号', '方向', '名义值'] + ['LNBMCUAK1LT108228', 'LNBMCUAKXLT107904', 'LNBMCUAK5LT106417',
                                            'LNBMCUAK2LT106164', 'LNBMCUAK0LT106096', 'LNBMCUAK9LT105982',
                                            'LNBMCUAK4LT104514']

    cols = df_readsql.columns
    df0 = df_readsql
    df1 = df0.loc[(df0[cols[0]].isin(point_list)) &
                  (df0[cols[1]] == direction), vin_list]

    df = df1
    return df


def warning_point(header_cols=4, new_cols=3, old_cols=9):
    df = read_database()
    cols = df.columns
    df.columns = list(cols[:header_cols]) + [x[:17][-6:]
                                             for x in cols[header_cols:]]
    # 近new_cols台 与前old_cols台数据比较
    df.loc[:, 'warn'] = df.apply(
        lambda x: int(x[header_cols] < min(x[(header_cols+new_cols):(header_cols+new_cols+old_cols)])) +
        int(x[header_cols] > max(x[(header_cols+new_cols):(header_cols+new_cols+old_cols)])) +
        int(x[header_cols+1] < min(x[(header_cols+new_cols):(header_cols+new_cols+old_cols)])) +
        int(x[header_cols+1] > max(x[(header_cols+new_cols):(header_cols+new_cols+old_cols)])) +
        int(x[header_cols+2] < min(x[(header_cols+new_cols):(header_cols+new_cols+old_cols)])) +
        int(x[header_cols+2] > max(x[(header_cols+new_cols):(header_cols+new_cols+old_cols)])), axis=1)

    df_warn1 = df[df['warn'] == 3]

    # todo: 这里不用todo了，已搞定。此处要用copy复制一个df出来，然后再进行更改，否则会有警告，因为在改动df_warn1时，会在原来的df上改动
    df_warn1 = df_warn1.copy()
    # 近new_cols台均值与前old_cols台均值差异,sort_col用于排序，【均值差异】后面保留展示
    df_warn1.loc[:, ['sort_col']] = df_warn1.apply(lambda x: abs(mean(x[header_cols:(
        header_cols+new_cols)]) - mean(x[(header_cols+new_cols):(header_cols+new_cols+old_cols)])), axis=1)
    df_warn1.loc[:, ['均值差异']] = df_warn1.apply(lambda x: mean(x[header_cols:(
        header_cols+new_cols)]) - mean(x[(header_cols+new_cols):(header_cols+new_cols+old_cols)]), axis=1)
    # df_warn1.loc[:, ['均值差异']] = df_warn1.apply(lambda x: mean(x[header_cols:(header_cols+new_cols)]) - mean(x[(header_cols+new_cols):(header_cols+new_cols+old_cols)]), axis=1)
    df_warn2 = df_warn1.sort_values(by=['sort_col'], ascending=False)

    temp = df_warn2.pop('均值差异')
    df_warn2.insert(2, '均值差异', temp)
    df_warn = df_warn2.iloc[:, :(header_cols + new_cols + old_cols)]
    df_warn = df_warn.round(2)
    return df_warn


if __name__ == '__main__':
    pass
#     df_sl = point_select(read_database())
#     resp = chart_select_point(df_sl)
    df, colVin = read_excel_data()
    # print('aaa')

    saveColumnVinDict('F06', colVin)

    print('hello ....')
    # refresh_database(df)
    # df2 = read_database()
    # print(df)
