import pandas as pd
from clickhouse_driver import Client
import numpy as np
from datetime import timedelta
import datetime
import re
import random
import time
from jmr_resource import *

'''
   该方法用于生成jmr测试数据
'''

# 每个文件大概行数
total_count = 40
# 数据写入目录
write_path = '/data2/jmr_data'
# 当 write_file_flag 为 True 时写入文件，当为 False 时写入到clickhouse中
write_file_flag = False
# 生成文件个数
file_count = 1

client = Client(host='10.10.22.38', port=9100, user='default', password='d1234567', database='network_security')


def generate_random_string(i):
    return "1" + random.choice(event_list) + time.strftime("%Y%m%d%H%M%S%f", time.localtime())


# 定义一个自定义函数，用于生成随机时间
def random_time(t, x, y):
    return pd.to_datetime(pd.Timestamp.now() - timedelta(days=x) + pd.DateOffset(minutes=np.random.randint(-540, 600)))


# 读取数据
def read_data2df(sql):
    data, columns = client.execute(sql, columnar=True, with_column_types=True)
    df = pd.DataFrame({re.sub(r'\W', '_', col[0]): d for d, col in zip(data, columns)})
    return df


def get_type_dict(tb_name, field_list):
    sql = f"select name, type from system.columns where table='{tb_name}';"
    df = read_data2df(sql)
    df = df.set_index('name')
    type_dict = df.to_dict('dict')['type']
    # 初始化一个空字典，用于存储要保留的键值对
    result_dict = {}

    # 遍历list中的每个元素
    for key in field_list:
        # 如果元素存在于dict中，则将其添加到结果字典中
        if key in type_dict:
            result_dict[key] = type_dict[key]

    # 最后，返回结果字典
    return result_dict


def loop_insert(df_extend, i, sql, tb_name):
    # 将指定列应用自定义函数，生成随机时间
    if tb_name == 'td_text_transmission_event':
        df_extend['event_time'] = df_extend['event_time'].apply(random_time, args=(i, 2))
    elif tb_name == 'td_shezha_ip':
        df_extend['lastest_found_time'] = df_extend['lastest_found_time'].apply(random_time, args=(i, 2))
    elif tb_name == 'td_block_log_reported':
        df_extend['gather_time'] = df_extend['gather_time'].apply(random_time, args=(i, 2))
    elif tb_name == 'td_shezha_app_monitor_detail_reported':
        df_extend['access_time'] = df_extend['access_time'].apply(random_time, args=(i, 2))
    elif tb_name == 'td_iot_communication_event':
        df_extend['start_time'] = df_extend['start_time'].apply(random_time, args=(i, 2))
        df_extend['end_time'] = df_extend['end_time'].apply(random_time, args=(i, 2))
    else:
        df_extend['found_time'] = df_extend['found_time'].apply(random_time, args=(i, 2))
    # 将DataFrame转换为ClickHouse格式
    clickhouse_df = df_extend.to_dict(orient='records')
    # 插入数据
    client.execute(sql, clickhouse_df)


def write2ch(df, tb_name, field_list):
    type_dict = get_type_dict(tb_name, field_list)
    columns = list(type_dict.keys())
    # 类型处理
    for i in range(len(columns)):
        col_name = columns[i]
        col_type = type_dict[col_name]
        if 'Date' in col_type:
            df[col_name] = pd.to_datetime(df[col_name])
        elif 'Int' in col_type:
            df[col_name] = df[col_name].astype('int')
        elif 'Float' in col_type:
            df[col_name] = df[col_name].astype('float')
        elif col_type == 'String':
            df[col_name] = df[col_name].astype('str')

    # 将样例数据放大几倍，以接近要求行数
    nums = len(df)
    enlargement_factor = (total_count / nums)
    # 扩展原始数据到指定行数
    df_extend = pd.concat([df] * int(enlargement_factor), ignore_index=True)
    insert_sql = "INSERT INTO {} ({}) VALUES".format(tb_name, ','.join(field_list))
    # 生成多天的数据
    for i in range(0, 1):
        # 将一份数据循环插入多次

        for x in range(1, file_count + 1):
            start = pd.Timestamp.now()
            # 将 df_extend 写入csv文件
            if write_file_flag:
                # 生成当前时间
                current_timestamp = datetime.datetime.now().timestamp()
                if tb_name in ['td_text_transmission_event', 'td_shezha_ip', 'td_block_log_reported']:
                    df_extend.to_csv('/data2/jmr_data/{}_{}_{}.csv.tmp'.format(
                        tb_name, current_timestamp, x), index=False, sep='|', header=False, mode='a')
                else:
                    # 删除df_extend指定列
                    df_extend.drop(columns=['data_sources', 'platform']).to_csv('/data2/jmr_data/{}_{}_{}.csv.tmp'
                                                                                .format(tb_name, current_timestamp, x),
                                                                                index=False, sep='|', header=False,
                                                                                mode='a')
            else:
                loop_insert(df_extend, i, insert_sql, tb_name)
            print(f"{tb_name} 生成第 {i + 1} 天数据 {x} 次, 共  {len(df_extend)} 行, 开始插入时间: {start}, 结束插入时间: {pd.Timestamp.now()}")


def get_schema(table):
    # 获取表中的字段名称
    schema = client.execute("DESCRIBE TABLE {}".format(table))
    fields = [field[0] for field in schema]
    print(fields)


def file2df(schema, file):
    # 读取文件中的数据，指定分隔符
    df = pd.read_csv('data/{}'.format(file), sep='|', names=schema)
    df['device_id'] = '654022AC005'
    return df.fillna(1)


if __name__ == '__main__':

    fan_dict = {
        # 'td_host_control_event': td_host_control_event_fields,
        # 'td_network_attack_event': td_network_attack_event_fields,
        'td_harm_program_event': td_harm_program_event_fields,
        # 'td_text_transmission_event': td_text_transmission_event_fields,
        # 'td_shezha_ip': td_shezha_ip_fields,
        # 'td_block_log_reported': td_block_log_reported_fields,
        # 'td_iot_communication_event': td_iot_communication_event_fields,
        # 'td_shezha_app_monitor_detail_reported': td_shezha_app_monitor_detail_reported_fields
    }

    for key, files_list in fan_dict.items():
        # 输出 key 和 files_list 的长度
        print(f"Key: {key}, Fields List Length: {len(files_list)}")
        src_df = file2df(files_list, '{}.csv'.format(key))
        write2ch(src_df, key, files_list)
