# -*- coding: utf-8 -*-
'''
@Time    : 24-5-24 下午15:37
@Author  : zhangweihao
@File    : tasks.py
'''
import json
import logging
import sys
sys.path.append('/home/tutu/公司/medical_backend/glbx_backend')
from celery_task.main import celery
import joblib
from tqdm import tqdm
import pandas as pd
import numpy as np
import time
import chinese_calendar
from sklearn.tree import DecisionTreeRegressor,DecisionTreeClassifier,ExtraTreeRegressor
import pandas as pd
import psycopg2
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from configs.config import Configs
from sqlalchemy import func, and_
from model.auto_model.abnormal_check import AbnormalPrivacyAiModelModel,AbnormalPrivacyDataModel
from model._base import DataConn
from collections import Counter

pd.options.mode.chained_assignment = None
# 统计每个源IP在每小时的发送量
def handle_data(origin_data:pd.DataFrame, filter_col=None, filter=None, save=False) -> pd.DataFrame:
    save_name = './hour_source_data.txt'
    if filter and filter_col:
        origin_data = origin_data[origin_data[filter_col].isin(filter)]
        save_name = f'./hour_source_data_{filter_col}.txt'
    origin_data['time'] = pd.to_datetime(origin_data['time'])
    origin_data['date'] = origin_data['time'].dt.date
    origin_data['is_holiday'] = origin_data['date'].apply(lambda x: 1 if chinese_calendar.is_holiday(x) else 0)
    # origin_data['destination_ip'] = origin_data['destination_ip'].apply(lambda x: hash_ip(x))
    origin_data['hour_range'] = origin_data['time'].dt.hour
    origin_data['is_disease'] = 0 
    hour_source_data = origin_data.groupby(['destination_ip', 'date', 'hour_range', 'is_holiday', 'is_disease']).size().\
        reset_index(name='count').sort_values(['destination_ip', 'hour_range'])
    hour_source_data = hour_source_data.drop('date', axis=1)
    # 借用箱线图的方法，清除异常数据
    df = hour_source_data
    ip_list = list(set(df['destination_ip'].values.tolist()))
    final_df = pd.DataFrame()
    type_ = 'count'
    for ip in tqdm(ip_list, total=len(ip_list), desc='处理数据'):
        for hour in range(24):
            final_df = handle_def_data(df,final_df,ip,hour,0,0,type_)
            final_df = handle_def_data(df,final_df,ip,hour,0,1,type_)
            final_df = handle_def_data(df,final_df,ip,hour,0,2,type_)
            final_df = handle_def_data(df,final_df,ip,hour,0,3,type_)
            final_df = handle_def_data(df,final_df,ip,hour,1,0,type_)
            final_df = handle_def_data(df,final_df,ip,hour,1,1,type_)
            final_df = handle_def_data(df,final_df,ip,hour,1,2,type_)
            final_df = handle_def_data(df,final_df,ip,hour,1,3,type_)
    if final_df.empty:
        return pd.DataFrame()
    final_df = final_df.reset_index().drop('index', axis=1)
    final_df[['ip1', 'ip2', 'ip3', 'ip4']] = final_df['destination_ip'].str.split('.', expand=True)
    final_df = final_df.drop('destination_ip', axis=1)
    # 将final_df的列按照ip1  ip2  ip3  ip4  hour_range  is_holiday  is_disease  count顺序摆放
    final_df = final_df[['ip1', 'ip2', 'ip3', 'ip4', 'hour_range', 'is_holiday', 'is_disease', 'count','label']]
    if save:
        final_df.to_csv(save_name, sep=',', index=False)
    return final_df

def handle_data_size(origin_data:pd.DataFrame, save=False) -> pd.DataFrame:
    save_name = './hour_source_data_size.txt'
    origin_data['time'] = pd.to_datetime(origin_data['time'])
    origin_data['date'] = origin_data['time'].dt.date
    origin_data['is_holiday'] = origin_data['date'].apply(lambda x: 1 if chinese_calendar.is_holiday(x) else 0)
    # origin_data['destination_ip'] = origin_data['destination_ip'].apply(lambda x: hash_ip(x))
    origin_data['hour_range'] = origin_data['time'].dt.hour
    origin_data['is_disease'] = 0
    hour_source_data = origin_data.groupby(['destination_ip', 'date', 'hour_range', 'is_holiday', 'is_disease']).agg({
    'data_size': 'sum',  # 将 data_size 列求和
            }).reset_index().sort_values(['destination_ip', 'hour_range'])
    hour_source_data = hour_source_data.drop('date', axis=1)
    # 将hour_source_data['data_size']从字节转kb
    hour_source_data['data_size'] = round(hour_source_data['data_size'] / 1024, 2)
    # 借用箱线图的方法，清除异常数据
    df = hour_source_data
    ip_list = list(set(df['destination_ip'].values.tolist()))
    final_df = pd.DataFrame()
    type_ = 'data_size'
    for ip in tqdm(ip_list, total=len(ip_list), desc='处理数据'):
        for hour in range(24):
            final_df = handle_def_data(df,final_df,ip,hour,0,0,type_)
            final_df = handle_def_data(df,final_df,ip,hour,0,1,type_)
            final_df = handle_def_data(df,final_df,ip,hour,0,2,type_)
            final_df = handle_def_data(df,final_df,ip,hour,0,3,type_)
            final_df = handle_def_data(df,final_df,ip,hour,1,0,type_)
            final_df = handle_def_data(df,final_df,ip,hour,1,1,type_)
            final_df = handle_def_data(df,final_df,ip,hour,1,2,type_)
            final_df = handle_def_data(df,final_df,ip,hour,1,3,type_)
            # 使用多线程速度更慢
            # thread_list = []
            # result_list = [pd.DataFrame()]*8
            # thread_list.append(threading.Thread(target=handle_def_data, args=(df, pd.DataFrame(), ip, hour, 0, 0, result_list)))
            # thread_list.append(threading.Thread(target=handle_def_data, args=(df, pd.DataFrame(), ip, hour, 0, 1, result_list)))
            # thread_list.append(threading.Thread(target=handle_def_data, args=(df, pd.DataFrame(), ip, hour, 0, 2, result_list)))
            # thread_list.append(threading.Thread(target=handle_def_data, args=(df, pd.DataFrame(), ip, hour, 0, 3, result_list)))
            # thread_list.append(threading.Thread(target=handle_def_data, args=(df, pd.DataFrame(), ip, hour, 1, 0, result_list)))
            # thread_list.append(threading.Thread(target=handle_def_data, args=(df, pd.DataFrame(), ip, hour, 1, 1, result_list)))
            # thread_list.append(threading.Thread(target=handle_def_data, args=(df, pd.DataFrame(), ip, hour, 1, 2, result_list)))
            # thread_list.append(threading.Thread(target=handle_def_data, args=(df, pd.DataFrame(), ip, hour, 1, 3, result_list)))
            # for i in thread_list:
            #     i.start()
            # for i in thread_list:
            #     i.join()
            # df_1, df_2, df_3, df_4, df_5, df_6, df_7, df_8 = result_list
            # final_df = pd.concat([final_df, df_1, df_2, df_3, df_4, df_5, df_6, df_7, df_8]) if not final_df.empty \
            #     else pd.concat([df_1, df_2, df_3, df_4, df_5, df_6, df_7, df_8])
    if final_df.empty:
        return pd.DataFrame()
    final_df = final_df.reset_index().drop('index', axis=1)
    final_df[['ip1', 'ip2', 'ip3', 'ip4']] = final_df['destination_ip'].str.split('.', expand=True)
    final_df = final_df.drop('destination_ip', axis=1)
    # 将final_df的列按照ip1  ip2  ip3  ip4  hour_range  is_holiday  is_disease  count顺序摆放
    final_df = final_df[['ip1', 'ip2', 'ip3', 'ip4', 'hour_range', 'is_holiday', 'is_disease', 'data_size','label']]
    if save:
        final_df.to_csv(save_name, sep=',', index=False)
    return final_df

def pretreat(lis:list):
    if lis:
        # 计算四分位数
        Q1 = np.percentile(lis, 25)
        Q3 = np.percentile(lis, 75)

        # 计算内限和外限
        IQR = Q3 - Q1
        outer_fence = (Q1 - 3 * IQR, Q3 + 3 * IQR)
        return outer_fence
    return None

def create_error_data(df:pd.DataFrame,type_):
    max_data_size = df[type_].max()
    # 将df3中data_size的按顺序设置为最大值的1.2倍，再将这个值分别乘1.5,2,5,10,20,30以此类推
    multiple_list = [1.5,2,5,10,15,20,30,40,50,60,70,80,90,100,110,120,130,140,150,160,170,180,190,200,210,220,230,240,250,260,270,280,290,300]
    error_list = []
    for i in range(len(df[type_].tolist())):
        error_list.append(round(max_data_size * 1.2 * multiple_list[i], 2))
    df[type_] = error_list
    df['label'] = 1 #设定这些值为异常数据
    return df

def handle_def_data(df,final_df,ip,hour,is_holiday,is_disease,type_,result_list=None):
    df2 = df[(df['destination_ip'] == ip) & (df['hour_range'] == hour) & (df['is_holiday'] == is_holiday) & (df['is_disease'] == is_disease)]
    check_list = df2[type_].tolist()
    inner_fence = pretreat(check_list)
    if inner_fence:
        df2 = df2[(df2[type_] >= inner_fence[0]) & (df2[type_] <= inner_fence[1])]
        df3 = df2.copy()
        df2['label'] = 0 #设定这些值为正常数据
        df3 = create_error_data(df3,type_)
        df2 = pd.concat([df2, df3])
        final_df = pd.concat([final_df, df2]) if not final_df.empty else df2
        # 搞多线程的时候使用
        # if result_list:
        #     if not is_holiday:
        #         result_list[is_disease] = final_df
        #     else:
        #         result_list[is_disease + 4] = final_df
    return final_df

def train_model(df:pd.DataFrame,type_):
    # 分离特征和标签
    X = df.drop('label', axis=1)  # 特征
    y = df['label']  # 标签

    model = DecisionTreeClassifier()

    start_time = time.time()
    model.fit(X,y)
    end_time = time.time()
    logging.warning(f'{type_}训练完成,耗时{end_time-start_time}s')
    model_filename = Configs().AI_STATIC_PATH + f'/{type_}_model.joblib'
    joblib.dump(model, model_filename)
    return model_filename

def model_predict(data,type_):
    model_filename = f'{type_}_model.joblib'
    model = joblib.load(model_filename)
    return model.predict(data)

class Get_30days_data():
    def __init__(self):
        self.conn = None
        self.cur = None
    
    def sql_connect(self):
        try:
            self.conn = psycopg2.connect(
                dbname=Configs().DB_NAME,
                user=Configs().DB_USER,
                password=Configs().DB_PASSWORD,
                host=Configs().DB_IP,
                port=Configs().DB_PORT
                )
            self.cur = self.conn.cursor()
        except:
            self.conn = None
            self.cur = None
    
    def sql_close(self):
        try:
            self.conn.close()
            self.cur.close()
        except:
            self.conn = None
            self.cur = None
    
    def get_date(self):
        start_date = datetime.today()
        end_date = start_date - relativedelta(months=1)

        date_list = []
        current_date = start_date
        while current_date > end_date:
            date_list.append('privacy_data' + current_date.strftime('%Y%m%d'))
            current_date -= timedelta(days=1)
        
        return tuple(date_list)
    
    def get_tables(self, date_list: tuple):
        sql_1 = """
            SELECT table_name
            FROM information_schema.tables
            WHERE table_name LIKE 'privacy_data%'
        """
        self.cur.execute(sql_1)
        self.conn.commit()
        rows = self.cur.fetchall()

        if not rows:
            return []
        return tuple([row[0] for row in rows if row[0] in date_list])
    
    def get_30days_data(self):
        self.sql_connect()
        table_names = self.get_tables(self.get_date())
        if not table_names:
            return pd.DataFrame()
        sql_query = ""
        for table_name in table_names:
            sql_query += f"SELECT * FROM {table_name} UNION ALL "
        sql_query = sql_query[:-11]
        sql_all = f"""
        SELECT privacy_model_name, source_ip, destination_ip, privacy_type, format, level, data_size, time
            FROM ({sql_query}
            )
        """
        self.cur.execute(sql_all)
        self.conn.commit()
        rows = self.cur.fetchall()
        self.sql_close()
        return pd.DataFrame(rows, columns=['privacy_model_name', 'source_ip', 'destination_ip', 'privacy_type', 'format', 'level', 'data_size', 'time'])
    
    def save_data2txt(self, max_num=30):
        data = self.get_30days_data()
        with open('30days_data.txt', 'w') as file:
            file.write('privacy_model_name,source_ip,destination_ip,privacy_type,format,level,data_size,time' + '\n')
            if max_num:
                def_dic = {}
                for row in data:
                    if def_dic.get(str(row), 0) < max_num:
                        for word in row:
                            file.write(word)
                            if i+1 != len(row):
                                file.write(',')
                        file.write('\n')
                        def_dic[str(row)] = def_dic.get(str(row), 0) + 1
            else:
                for row in data:
                    for i,word in enumerate(row):
                        file.write(str(word))
                        if i+1 != len(row):
                            file.write(',')
                    file.write('\n')
        return True

def complete_training(type_):
    complete_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    logging.error(complete_time)
    css = Get_30days_data()
    css.sql_connect()
    sql = """
        UPDATE abnormal_privacy_ai_model SET is_training = 0,final_model_time = %s WHERE model_type = %s
        """
    css.cur.execute(sql, (complete_time,type_))
    css.conn.commit()
    css.sql_close()


@celery.task()
def start_train(type_):
    path = None
    css = Get_30days_data()
    data = css.get_30days_data()
    if not data.empty:
        type_dict = {
            'all':{'filter_col':None, 'filter':None}, 
            'level':{'filter_col':'level', 'filter':['high','critical']}, 
            'identity':{'filter_col':'privacy_model_name', 'filter':['身份信息']},
            'property':{'filter_col':'privacy_model_name', 'filter':['财产信息']}, 
            'account':{'filter_col':'privacy_model_name', 'filter':['账户信息']},
            'credit':{'filter_col':'privacy_model_name', 'filter':['信用信息']}, 
            'financial':{'filter_col':'privacy_model_name', 'filter':['金融交易']},
            'authentication':{'filter_col':'privacy_model_name', 'filter':['鉴别信息']},
            'derived':{'filter_col':'privacy_model_name', 'filter':['衍生信息']},
            'reputational':{'filter_col':'privacy_model_name', 'filter':['声誉风险']},
            'encrypted':{'filter_col':'privacy_model_name', 'filter':['加密数据']},
                    }
        if type_ == 'data_size':
            origin_data = handle_data_size(data)
        else:
            origin_data = handle_data(data,type_dict[type_]['filter_col'],type_dict[type_]['filter'])
        if not origin_data.empty:
            path = train_model(origin_data, type_)
    complete_training(type_)
    return path

def get_filters(form):
    filters = []
    if form.get('destination_ip'):
        filters.append(AbnormalPrivacyDataModel.destination_ip.like(f"%{form.get('destination_ip')}%"))
    if form.get('type'):
        types = json.loads(form.get('type'))
        filters.append(AbnormalPrivacyDataModel.type.in_(types))
    if form.get('handle_status'):
        filters.append(AbnormalPrivacyDataModel.handle_status == int(form.get('handle_status')))
    start_time = form.get('start_time')
    end_time = form.get('end_time')
    if start_time and end_time:
        filters.append(AbnormalPrivacyDataModel.time.between(start_time, end_time))
    source_ips = form.get('source_ips')
    if source_ips:
        source_ip_list = json.loads(source_ips)
        ip_filter_conditions = []
        for ip in source_ip_list:
            ip_filter_conditions.append(AbnormalPrivacyDataModel.source_ips.contains(ip))
        filters.append(and_(*ip_filter_conditions))
    return filters

TYPE_DICT = {
        'all':'总数数量异常', 
        'level':'高危数据异常', 
        'identity':'身份信息数量异常',
        'property':'财产信息数量异常', 
        'account':'账户信息数量异常',
        'credit':'信用信息数量异常', 
        'financial':'金融交易数量异常',
        'authentication':'鉴别信息数量异常',
        # 'derived':'衍生信息数量异常',
        'reputational':'声誉风险数量异常',
        'encrypted':'加密数据数量异常',
        'data_size':'数据量大小异常'
                 }

@celery.task()       
def get_summarize(form, TRAIN_TYPE_LIST):
    filters = get_filters(form)
    filter_list = TRAIN_TYPE_LIST.copy()
    filter_list.remove('derived')
    db = DataConn()
    res = dict(db.session.query(AbnormalPrivacyDataModel.type, func.count()).filter(*filters).group_by(AbnormalPrivacyDataModel.type).all())
    summarize = []
    for item in filter_list:
        summarize.append({'name': TYPE_DICT.get(item, '无分类'),'count': res.get(item, 0), 'img': 'static/abnormal/' + item + '.png'})
    db.session.remove()
    return summarize

@celery.task()       
def get_analysis(form, type):
    filters = get_filters(form)
    db = DataConn()
    res = []
    model = getattr(AbnormalPrivacyDataModel, type, None)
    if model:
        if type == 'source_ips':
            lis = []
            res1 = db.session.query(model).filter(*filters).all()
            for i in res1:
                lis.extend(json.loads(i[0]))
            data = Counter(lis)
        else:
            data = dict(db.session.query(model, func.count()).filter(*filters).group_by(model).order_by(func.count().desc()).all())
        sum_ = sum([_ for _ in data.values()])
        for key, value in data.items():
            res.append({'type': key, 'count': value, 'percent': str(round((value / sum_)*100, 2)) + '%'})
        res = sorted(res, key=lambda x: (x['count'], x['type']), reverse=True)
        start_index = (int(form.get('page', 1)) - 1) * int(form.get('limit', 10))
        end_index = start_index + int(form.get('limit', 10))
        pagination = {"limit": int(form.get('limit', 10)), "page": int(form.get('page', 1)), "total": len(res)}
        paged_data = res[start_index:end_index]
        paged_data = {'data': paged_data, 'pagination': pagination}
    db.session.remove()
    return paged_data

@celery.task()       
def get_top5(form, type):
    filters = get_filters(form)
    db = DataConn()
    res = []
    model = getattr(AbnormalPrivacyDataModel, type, None)
    if model:
        if type == 'source_ips':
            lis = []
            res1 = db.session.query(model).filter(*filters).limit(5).all()
            for i in res1:
                lis.extend(json.loads(i[0]))
            data = Counter(lis)
        else:
            data = dict(db.session.query(model, func.count()).filter(*filters).group_by(model).order_by(func.count().desc()).limit(5).all())
        sum_ = sum([_ for _ in data.values()])
        for key, value in data.items():
            res.append({'type': key, 'count': round((value / sum_)*100, 2)})
        res = sorted(res, key=lambda x: (x['count'], x['type']), reverse=True)
    db.session.remove()
    return res

if __name__ == "__main__":
    # css = Get_30days_data()
    # origin_data = css.get_30days_data()
    # print(origin_data)
    # exit()

    print(start_train('all'))
    exit()

    # origin_data = pd.read_csv('./30days_data.txt', sep=',') # 上一个月的数据
    # df = handle_data_size(origin_data)
    # df = handle_data(origin_data,filter_col='level',filter=['high','critical'])
    # df = handle_data(origin_data,filter_col='privacy_model_name',filter=['身份信息'])
    # df = handle_data(origin_data)
    # print(df)
    # print('数据预处理完成!')
    # exit()
    df = pd.read_csv('glbx_backend/api/v2/abnormal_check/hour_source_data.txt', sep=',')
    train_model(df,'all')
    test_data = [[172,16,2,90,0,0,0,90]]
    print(test_data)
    res = model_predict(test_data,'all')
    print(res)
