import pandas as pd
from tqdm import tqdm
import re
from sql_metadata import Parser
import seaborn as sns
import matplotlib.pyplot as plt
from sql_analysis.code.utils.funcs_classification import funcs_classes_dict, funcs_classes_mean

# 设置matplotlib字体
# plt.rcParams['font.sans-serif'] = ['SimHei']  # 设置中文字体
plt.rcParams['font.sans-serif'] = ['Heiti TC']  # 设置中文字体
plt.rcParams['axes.unicode_minus'] = False  # 解决保存图像是负号'-'显示为方块的问题

MYSQL_FUNCS_LIST_PATH = "./utils/funcs/mysql_funcs_list.csv"


def is_tables_columns(tables_columns_names, token):
    if token in tables_columns_names:
        return True
    if '.' in token:
        return True
    return False


def is_hanzi(token):
    return re.fullmatch(r'[\u4e00-\u9fff]+', token) is not None


def is_in_funcs(token, funcs_list):
    if token in funcs_list:
        return True
    else:
        for func in funcs_list:
            if (token.upper() == func.upper()) or ((token.upper() + "()") == func.upper()):
                return True
    return False


def get_sqls_funcs(sql_path):
    func = pd.read_csv(MYSQL_FUNCS_LIST_PATH)
    funcs_list = func['func'].values
    sqls = pd.read_csv(sql_path)
    sqls_funcs = pd.DataFrame(columns=funcs_list)
    parsed_sqls = []
    failed_parsed_num = 0
    for sql in tqdm(sqls['SQL语句'].values):
        one_sql_func_count = {key: 0 for key in funcs_list}
        res = Parser(sql)
        try:
            res_tokens = res.tokens
            columns = res.columns
            columns_aliases = res.columns_aliases_names
            tables = res.tables
            tables_aliases = res.tables_aliases
            tables_columns_names = columns + tables + columns_aliases + list(tables_aliases.keys()) + list(
                tables_aliases.values())
        except:
            failed_parsed_num += 1
            # print("此sql无法解析:{}\n\n".format(sql))
            continue

        for i in range(0, len(res_tokens)):
            token_value = res_tokens[i].value.upper()
            if is_in_funcs(token_value, funcs_list) and (not is_tables_columns(tables_columns_names, token_value)) and (
                    not is_hanzi(token_value)):
                if token_value in funcs_list:
                    one_sql_func_count[token_value] += 1
                else:
                    if token_value + "()" in funcs_list:
                        one_sql_func_count[token_value + "()"] += 1
        parsed_sqls.append(sql)

        # sqls_funcs = sqls_funcs.append(one_sql_func_count, ignore_index=True)
        sqls_funcs = pd.concat([sqls_funcs, pd.DataFrame([one_sql_func_count])], ignore_index=True)

    # 增加一列sql信息
    sqls_funcs['sql'] = parsed_sqls
    # 调整列顺序
    func_distribution = sqls_funcs[['sql'] + list(funcs_list)]
    # 找出并删除所有值为0的列
    sqls_funcs = sqls_funcs.loc[:, (sqls_funcs != 0).any(axis=0)]

    print("数据集共有{}个SQL语句，其中{}个SQL语句无法解析。".format(sqls.shape[0], failed_parsed_num))
    return sqls_funcs


def get_class(token):
    for key, value in funcs_classes_dict.items():
        if token in value:
            return funcs_classes_mean[key]
    return None


def visualization(df, figure_save_path):
    func_classes_dict = funcs_classes_dict
    func_classes_mean = funcs_classes_mean
    res = df[['sql']]
    df.drop(['sql'], axis=1, inplace=True)
    for key, value in func_classes_dict.items():
        res[key] = df[set(func_classes_dict[key]).intersection(df.columns)].sum(axis=1)
    res.rename(columns=func_classes_mean, inplace=True)
    fenbu = res.drop(['sql'], axis=1).apply(lambda x: (x > 0).sum()) / res.shape[0]
    # 使用Seaborn进行可视化
    plt.figure(figsize=(15, 6))
    # 在每个条形上方添加值
    # 使用Seaborn进行可视化
    plt.figure(figsize=(10, 6))
    ax = sns.barplot(x=fenbu.index, y=fenbu.values, palette='bright')
    # 将标签添加到每个条形上并保留两位小数
    for p in ax.patches:
        height = p.get_height()
        ax.annotate(f'{height:.2%}', (p.get_x() + p.get_width() / 2., height),
                    ha='center', va='center', fontsize=12, color='black', xytext=(0, 5),
                    textcoords='offset points')
    # 优化图表显示

    plt.xticks(rotation=45)
    plt.xlabel('运算符/函数类型')
    plt.ylabel('sql个数占比')
    plt.title('不同类型运算符的占比')
    plt.tight_layout()
    plt.savefig(figure_save_path, format='png', dpi=300)
    plt.show()


def get_funcs_distribution(sql_path, distribution_save_path, figure_save_path):
    sqls_funcs = get_sqls_funcs(sql_path=sql_path)
    # 删除无效列
    try:
        sqls_funcs.drop(['X()', 'Y()'], axis=1, inplace=True)
    except:
        pass
    df = sqls_funcs.drop(['sql'], axis=1)
    non_zero_counts = df.apply(lambda x: (x != 0).sum())
    non_zero_ratio = df.apply(lambda x: (x != 0).mean())
    total_counts = df.apply(lambda x: sum(x))
    max_counts = df.apply(lambda x: max(x))

    # 创建一个新的DataFrame以展示结果
    result_df = pd.DataFrame({
        '含有该运算符/函数的sql个数': non_zero_counts,
        '含有该运算符/函数的sql个数占比': non_zero_ratio,
        '所有sql中，该运算符/函数出现的总频次': total_counts,
        '一个sql中，使用了该运算符/函数的最大频次': max_counts

    })
    result_df["包含的运算符/函数"] = result_df.index
    result_df['运算符/函数大类类别'] = result_df['包含的运算符/函数'].apply(lambda x: get_class(x))
    result_df = result_df.sort_values(by='运算符/函数大类类别', ascending=True)
    result_df[
        ["运算符/函数大类类别", "包含的运算符/函数", '含有该运算符/函数的sql个数', "含有该运算符/函数的sql个数占比",
         "所有sql中，该运算符/函数出现的总频次", "一个sql中，使用了该运算符/函数的最大频次"]].to_csv(
        distribution_save_path, index=False)

    # 可视化
    visualization(sqls_funcs, figure_save_path)


if __name__ == "__main__":
    get_funcs_distribution("../data/电力/单表问答对.csv", "../data/电力/electricity_sql_func_distributions.csv",
                           "../data/电力/electricity_sql_func_distribution.png")
    get_funcs_distribution("../data/浪潮/问答对.csv", "../data/浪潮/inspur_sql_func_distributions.csv",
                           "../data/浪潮/inspur_sql_func_distribution.png")
