from apps.logger_config import logger
import tempfile

import pandas as pd
import pymysql
import numpy as np
import requests
from pandas import DataFrame


# 建立数据库连接
def build_sql_connect():
    conn = pymysql.connect(
        # host='123.56.13.213',  # 数据库地址
        # port=3306,  # 数据库端口号
        # user='data_change_system',  # 连接数据库的用户
        # passwd='data_change_system_123456',  # 连接数据库的密码
        # db='cdash_to_sdtm',  # 数据库的库名
        # charset='utf8mb4',  # 字符集
        host='rm-2zev85w9uvzpiow15.mysql.rds.aliyuncs.com',  # 数据库地址
        port=3306,  # 数据库端口号
        user='lingxun',  # 连接数据库的用户
        passwd='Sunqiuling591',  # 连接数据库的密码
        db='lingmerge',  # 数据库的库名
        charset='utf8mb4',  # 字符集
    )
    return conn


# 获取项目中所有域名
def get_all_domain(project_id):
    conn = build_sql_connect()
    sql = "select distinct domain from var_setting where project_id = '%s'" % project_id
    try:
        data = pd.read_sql(sql, conn)  # 以DataFrame格式读取显示
        # 将一列转换成列表
        domain_array = np.array(data.stack())
        domain_lst: list[str] = domain_array.tolist()

        # 删除空的补充数据集
        supp_var_list = ['STUDYID', 'RDOMAIN', 'IDVAR', 'IDVARVAL', 'QNAM', 'QLABEL', 'QVAL', 'QORIG', 'QEVAL']
        domain_lst1 = [domain for domain in domain_lst]
        for domain in domain_lst1:
            if domain.startswith("SUPP"):
                all_variable_info = get_all_variable_info(project_id, domain)
                var_list = list(all_variable_info['variable'])
                if set(supp_var_list) == set(var_list):
                    domain_lst.remove(domain)
        # 提交更改
        conn.commit()
        # print("查询所有域名成功")
        return domain_lst
    except Exception as error:
        logger.error(f"查询所有域名失败: {error}")
    finally:
        # 关闭连接对象
        conn.close()


# 获取一个域中所有变量的信息
def get_all_variable_info(project_id, domain):
    # 创建连接
    conn = build_sql_connect()
    sql = "select * from var_setting where project_id = '%s' and domain = '%s'" % (project_id, domain)
    try:
        all_variable_df = pd.read_sql(sql, conn)  # 以DataFrame格式读取显示
        # 提交更改
        conn.commit()
        # print("获取映射关系成功")
        return all_variable_df
    except Exception as error:
        logger.error(f"获取一个域中所有变量的信息失败: {error}")
    finally:
        conn.close()


# 获取域中所有父变量的信息
def get_standard_variable_info(project_id, domain):
    # 创建连接
    conn = build_sql_connect()
    # 只读取父变量
    sql = "select * from var_setting where project_id = '%s' and domain = '%s' and parent_id = -1" % (
    project_id, domain)
    try:
        all_variable_df = pd.read_sql(sql, conn)  # 以DataFrame格式读取显示
        # 提交更改
        conn.commit()
        # print("获取映射关系成功")
        return all_variable_df
    except Exception as error:
        logger.error(f"获取域中所有父变量的信息失败: {error}")
    finally:
        conn.close()


# 获取变量的一行信息
def get_var_info(project_id, domain, variable) -> DataFrame:
    conn = build_sql_connect()
    sql = ("select * from var_setting where project_id = '%s' and domain = '%s' and variable = '%s'"
           % (project_id, domain, variable))
    try:
        data = pd.read_sql(sql, conn)  # 以DataFrame格式读取显示
        # 提交更改
        conn.commit()
        # print("查询变量一行信息成功")
        return data
    except Exception as error:
        logger.error(f"查询变量信息失败: {error}")
    finally:
        conn.close()


# 获取值级元数据
def get_value_list(project_id, domain, variable) -> DataFrame:
    conn = build_sql_connect()
    var_info = get_var_info(project_id, domain, variable)
    var_id = int(np.array(var_info['var_id'])[0])

    sql = ("select * from var_setting where project_id = '%s' and domain = '%s' and parent_id = '%d'"
           % (project_id, domain, var_id))
    try:
        data = pd.read_sql(sql, conn)  # 以DataFrame格式读取显示
        # 提交更改
        conn.commit()
        # print("查询值级元数据成功")
        return data
    except Exception as error:
        logger.error(f"查询值级元数据失败: {error}")
    finally:
        conn.close()


# 获取域的一行信息
def get_domain_info(project_id, domain):
    conn = build_sql_connect()
    sql = ("select * from domain_info where project_id = '%s' and domain = '%s'"
           % (project_id, domain))
    try:
        data = pd.read_sql(sql, conn)  # 以DataFrame格式读取显示
        # 提交更改
        conn.commit()
        # print("查询变量信息成功")
        return data
    except Exception as error:
        logger.error(f"查询域变量信息失败: {error}")
    finally:
        conn.close()


# 获取域的关键变量
def get_key_variable(project_id: str, domain: str) -> list:
    conn = build_sql_connect()
    sql = ("select variable from var_setting where project_id = '%s' and domain = '%s' and is_key_var = 1" % (
        project_id, domain))
    try:
        data = pd.read_sql(sql, conn)  # 以DataFrame格式读取显示
        # 提交更改
        conn.commit()
        # print("查询变量信息成功")
        data_array = np.array(data.stack())
        key_variable_lst = data_array.tolist()
        # 调整STUDYID、USUBJID的顺序,将STUDYID排在第一个，USUBJID排在第二个, 将--TESTCD排在第三个
        if 'STUDYID' in key_variable_lst:
            index = key_variable_lst.index('STUDYID')
            key_variable_lst[index], key_variable_lst[0] = key_variable_lst[0], key_variable_lst[index]
        if 'USUBJID' in key_variable_lst:
            index1 = key_variable_lst.index('USUBJID')
            index2 = key_variable_lst.index('STUDYID') + 1
            key_variable_lst[index1], key_variable_lst[index2] = key_variable_lst[index2], key_variable_lst[index1]
        if domain + 'TESTCD' in key_variable_lst:
            index1 = key_variable_lst.index(domain + 'TESTCD')
            index2 = key_variable_lst.index('USUBJID') + 1
            key_variable_lst[index1], key_variable_lst[index2] = key_variable_lst[index2], key_variable_lst[index1]
        return key_variable_lst
    except Exception as error:
        logger.error(f"查询关键变量信息失败: {error}")
    finally:
        conn.close()


# 获取原始数据集路径
def get_dataset_name(project_id: str) -> str:
    conn = build_sql_connect()
    sql = ("select dataset from project_info where project_id = '%s'" % project_id)
    try:
        data = pd.read_sql(sql, conn)  # 以DataFrame格式读取显示
        # 提交更改
        conn.commit()
        # print("查询变量信息成功")
        path = str(np.array(data.stack())[0])
        # return path.split('/')[3]
        return path.split(',')[1].split('/')[3]
    except Exception as error:
        logger.error(f"获取原始数据集名字失败: {error}")
    finally:
        conn.close()


# 将多个数据集对应的dataframe字典进行合并，得到一个字典{sheet_name: DataFrame}
def get_combined_dataframe(project_id):
    all_dfs = {}

    # 获取所有文件路径
    dataset_paths = get_dataset_local_path(project_id)

    for path, head_count in dataset_paths.items():
        # 读取Excel文件中的所有sheet
        if head_count == 2:  # 如果包含两行表头，则跳过第二行数据不读取
            dfs = pd.read_excel(path, sheet_name=None, index_col=None,
                                dtype={'SITEID': str, 'SUBJID': str}, skiprows=[1])
        else:
            dfs = pd.read_excel(path, sheet_name=None, index_col=None, dtype={'SITEID': str, 'SUBJID': str})

        # 将每个sheet的DataFrame添加到字典中
        for sheet_name, df in dfs.items():
            # 只读取DOMAIN列不为空的sheet
            # 因为去除了第二行表头，所以第1行就是真实数据了
            if 'DOMAIN' in df.columns.tolist() and (0 in df.index) and pd.notna(df.loc[0, 'DOMAIN']):
                domain = df.loc[0, 'DOMAIN']
                # 如果DOMAIN列的值不包含多个域，则直接添加
                # 否则跳过（暂不支持一张表属于两个域的情况）
                if ',' not in domain:
                    # 给每张表默认填充--CAT列
                    df[domain + "CAT"] = sheet_name
                    while True:
                        i = 0
                        if sheet_name not in all_dfs:
                            all_dfs[sheet_name] = df
                            break
                        else:  # 如果该sheet名字已存在，则修改sheet_name继续添加
                            sheet_name = sheet_name + '_' + str(i)
                            i += 1
    return all_dfs


# 获取原始数据集url和表头行数，并下载到本地
# 返回值： {path: head_count}
def get_dataset_local_path(project_id: str) -> dict[str, int]:
    conn = build_sql_connect()
    sql = ("select url , head_count from dataset_file where `project_id` = '%s' and `flag` = 0 " % project_id)
    try:
        data = pd.read_sql(sql, conn)  # 以DataFrame格式读取显示
        # 提交更改
        conn.commit()

        # 创建一个字典，其中键是url，值是对应的head_count
        url_to_head_count = data.set_index('url')['head_count'].to_dict()
        dataset_path = {}
        for url, head_count in url_to_head_count.items():
            temp_file_path = download_oss_file_to_temp_file(url)
            dataset_path[temp_file_path] = head_count
        return dataset_path
    except Exception as error:
        logger.error(f"下载数据集失败: {error}")
    finally:
        conn.close()


# 使用 tempfile 创建临时文件
def download_oss_file_to_temp_file(url):
    # temp_file.name 存储了临时文件的路径
    # 注意：使用 delete=False 参数使得文件在关闭后不会自动删除
    with tempfile.NamedTemporaryFile(delete=False, suffix=".xlsx") as temp_file:
        # 发送 GET 请求下载文件
        response = requests.get(url)

        # 检查请求是否成功
        if response.status_code == 200:
            # 将内容写入临时文件
            temp_file.write(response.content)
            print(f"File downloaded to {temp_file.name}")
        else:
            print("Failed to download file, status code:", response.status_code)
    return temp_file.name


# 获取所有SDTM变量
def get_all_sdtm_variable() -> list:
    conn = build_sql_connect()
    sql = "select variable from `sdtm_var_3.3` order by seq"
    try:
        data = pd.read_sql(sql, conn)  # 以DataFrame格式读取显示
        conn.commit()
        all_sdtm_variable_list = []
        for i in range(len(data)):
            all_sdtm_variable_list.append(data.loc[i, 'variable'])
        return all_sdtm_variable_list
    except Exception as error:
        logger.error(f"获取所有SDTM变量失败: {error}")
    finally:
        conn.close()


if __name__ == '__main__':
    # project_id = "d15e1d3ad50d48daae7fe46bcc8c9f48"
    # # project_id = '86af5e3968c9455aad692acf7037f29b'
    # df_dic = get_combined_dataframe(project_id)
    # for sheet_name, df in df_dic.items():
    #     print(sheet_name)
    #     print(df)

    all_sdtm_variable_list = get_all_sdtm_variable()
    print(all_sdtm_variable_list)
