# from icecream import ic
import json
from clickhouse_driver import Client
import pymysql
import pandas as pd
from pymysql.cursors import DictCursor

# with open('cfgs/input.json', 'r') as file:
#     config = json.load(file)

def gen_run_detail(rule_run_record_id,sql_info):
    mysql_conn = pymysql.connect(
    host=sql_info['mysqlInfo']['mysql']['host'],  # 指定要连接的数据库的地址，本机就写成 localhost，或者是 127.0.0.1
    port=sql_info['mysqlInfo']['mysql']['port'],   # 指定连接的数据库的端口号
    user=sql_info['mysqlInfo']['mysql']['user'],  # 设置要连接的mysql数据库的用户名
    password=sql_info['mysqlInfo']['mysql']['password'],  # 设置要连接的mysql数据库的密码
    )
    data_base=sql_info['mysqlInfo']['mysql']['db']
    sql1 = f'''
            select *
            from {data_base}.rl_ruleset_data_scope_run_detail
            WHERE rule_run_record_id = {rule_run_record_id}
            and rule_type ='OPERATION'
            order by wafer_id ,rule_step_order;
            '''
    cur = mysql_conn.cursor(cursor=DictCursor)
    cur.execute(sql1)
    df_run_detail = pd.DataFrame(cur.fetchall())
    sql2= f'''
            select *
            from {data_base}.rl_ruleset_data_scope_run_detail
            WHERE rule_run_record_id = {rule_run_record_id}
            order by wafer_id ,rule_step_order;
            '''
    cur = mysql_conn.cursor(cursor=DictCursor)
    cur.execute(sql2)
    df_run_detail_raw = pd.DataFrame(cur.fetchall())
    mysql_conn.close()
    return df_run_detail,df_run_detail_raw

def gen_data_cluster(result_id,sql_info):
    # 查询表的连接方式
    ck_conn = Client(
        host=sql_info['ckInfo']['onedata']['host'],
        port= sql_info['ckInfo']['onedata']['port'],
        user=sql_info['ckInfo']['onedata']['user'],
        password=sql_info['ckInfo']['onedata']['password'])
    data_base=sql_info['ckInfo']['onedata']['db']
    sql = f'''
            select *
            from {data_base}.rule_result_data_cluster
            where RESULT_ID={result_id}
            and DATA_TYPE='OUTPUT'
            '''
    df_data_cluster = ck_conn.query_dataframe(sql)
    # df_data_cluster.to_csv('a.csv')
    return df_data_cluster

def gen_virtual_name(virtual_id,sql_info):
    # virtual_id与name的一对一映射
    mysql_conn = pymysql.connect(
    host=sql_info['mysqlInfo']['mysql']['host'],  # 指定要连接的数据库的地址，本机就写成 localhost，或者是 127.0.0.1
    port=sql_info['mysqlInfo']['mysql']['port'],   # 指定连接的数据库的端口号
    user=sql_info['mysqlInfo']['mysql']['user'],  # 设置要连接的mysql数据库的用户名
    password=sql_info['mysqlInfo']['mysql']['password'],  # 设置要连接的mysql数据库的密码
    )
    data_base=sql_info['mysqlInfo']['mysql']['db']
    sql = f'''
            select *
            from {data_base}.rl_rule_data_scope_virtual_dataset
            where id={virtual_id}
            '''
    cur = mysql_conn.cursor(cursor=DictCursor)
    cur.execute(sql)
    res_dic=cur.fetchone()
    # print(cur.fetchone())
    input=res_dic['name']
    output=res_dic['input_data_type']

    return input,output

if __name__ == '__main__':
    import json
    with open('cfgs/input.json', 'r') as file:
        input = json.load(file)
    sql_info={}
    sql_info['mysqlInfo']=input['mysqlInfo']
    sql_info['ckInfo']=input['ckInfo']
    rule_run_record_id=int(input['recordId'])
    re_file_path=input['fileFullPath']
    result_id=101668
    ##
    # df_run_detail=gen_run_detail(rule_run_record_id,sql_info)
    # ic(df_run_detail)
    gen_data_cluster(result_id,sql_info)
    # virtual_id=514
    # name=gen_virtual_name(virtual_id)
    # print(name)



