# -*- coding: utf-8 -*-
'''
@Time    : 22-3-10 上午10:47
@Author  : zhang
@File    : msg.py
'''

import datetime
import json
import os
import sys
sys.path.append('/usr/src/glbx_backend/')
sys.path.append('/html/medical_backend/glbx_backend/')
sys.path.append('/home/tutu/公司/medical_backend/glbx_backend/')
from typing import Tuple
import chinese_calendar
import joblib
from configs.config import Configs
from apsche_task import BaseModel
import pandas as pd
from model.db_sql import DBSql

pd.options.mode.chained_assignment = None
class Abnormal_check(BaseModel):

    def __init__(self):

        self.db_conn = self.create_db_conn()
        self.db = DBSql()

    def get_tables(self, date_list: tuple):
        sql_1 = """
            SELECT table_name
            FROM information_schema.tables
            WHERE table_name LIKE 'privacy_data%'
        """
        with self.db.query_pd():
            rows = pd.read_sql_query(sql_1, con=self.db.conn)['table_name'].tolist()

        if not rows:
            return []
        return tuple([row for row in rows if row in date_list])
   
    def get_one_hour_data(self) -> Tuple[pd.DataFrame, datetime.datetime, datetime.datetime]:
        now = datetime.datetime.now()
        yesterday = now - datetime.timedelta(days=1)
        today_table = 'privacy_data' + now.strftime("%Y%m%d")
        yesterday_table = 'privacy_data' + yesterday.strftime("%Y%m%d")
        now_hour = now.replace(minute=0, second=0, microsecond=0)
        last_hour = now_hour - datetime.timedelta(hours=1)
        table_names = self.get_tables([today_table, yesterday_table])
        if not table_names:
            return pd.DataFrame(), last_hour, now_hour
        sql_query = ""
        for table_name in table_names:
            sql_query += f"SELECT * FROM {table_name} UNION ALL "
        sql_query = sql_query[:-11]

        sql1 = f"""
        SELECT id, privacy_model_name, source_ip, destination_ip, privacy_type, format, level, data_size, time
            FROM ({sql_query}
            )
        WHERE time <= '{now_hour}' AND time >= '{last_hour}'
        """
        with self.db.query_pd():
            df = pd.read_sql_query(sql1, con=self.db.conn)
        return df, last_hour, now_hour

    def handle_data(self, origin_data:pd.DataFrame, filter_col=None, filter=None) -> pd.DataFrame:
        if filter and filter_col:
            origin_data = origin_data[origin_data[filter_col].isin(filter)]
        if origin_data.empty:
            return pd.DataFrame()
        origin_data['time'] = pd.to_datetime(origin_data['time'])
        origin_data['date'] = origin_data['time'].dt.date
        origin_data['is_holiday'] = origin_data['date'].apply(lambda x: 1 if chinese_calendar.is_holiday(x) else 0)
        # origin_data['destination_ip'] = origin_data['destination_ip'].apply(lambda x: hash_ip(x))
        origin_data['hour_range'] = origin_data['time'].dt.hour
        origin_data['is_disease'] = 0 
        hour_source_data = origin_data.groupby(['destination_ip', 'date', 'hour_range', 'is_holiday', 'is_disease']).size().\
            reset_index(name='count').sort_values(['destination_ip', 'hour_range'])
        hour_source_data = hour_source_data.drop('date', axis=1)
        final_df = hour_source_data
        final_df = final_df.reset_index().drop('index', axis=1)
        final_df[['ip1', 'ip2', 'ip3', 'ip4']] = final_df['destination_ip'].str.split('.', expand=True)
        final_df = final_df.drop('destination_ip', axis=1)
        # 将final_df的列按照ip1  ip2  ip3  ip4  hour_range  is_holiday  is_disease  count顺序摆放
        final_df = final_df[['ip1', 'ip2', 'ip3', 'ip4', 'hour_range', 'is_holiday', 'is_disease', 'count']]
        return final_df

    def handle_data_size(self, origin_data:pd.DataFrame) -> pd.DataFrame:
        origin_data['time'] = pd.to_datetime(origin_data['time'])
        origin_data['date'] = origin_data['time'].dt.date
        origin_data['is_holiday'] = origin_data['date'].apply(lambda x: 1 if chinese_calendar.is_holiday(x) else 0)
        # origin_data['destination_ip'] = origin_data['destination_ip'].apply(lambda x: hash_ip(x))
        origin_data['hour_range'] = origin_data['time'].dt.hour
        origin_data['is_disease'] = 0
        hour_source_data = origin_data.groupby(['destination_ip', 'date', 'hour_range', 'is_holiday', 'is_disease']).agg({
        'data_size': 'sum',  # 将 data_size 列求和
                }).reset_index().sort_values(['destination_ip', 'hour_range'])
        hour_source_data = hour_source_data.drop('date', axis=1)
        # 将hour_source_data['data_size']从字节转kb
        hour_source_data['data_size'] = round(hour_source_data['data_size'] / 1024, 2)
        final_df = hour_source_data
        final_df = final_df.reset_index().drop('index', axis=1)
        final_df[['ip1', 'ip2', 'ip3', 'ip4']] = final_df['destination_ip'].str.split('.', expand=True)
        final_df = final_df.drop('destination_ip', axis=1)
        # 将final_df的列按照ip1  ip2  ip3  ip4  hour_range  is_holiday  is_disease  count顺序摆放
        final_df = final_df[['ip1', 'ip2', 'ip3', 'ip4', 'hour_range', 'is_holiday', 'is_disease', 'data_size']]
        return final_df


    def model_predict(self,data,type_):
        model_filename = Configs().AI_STATIC_PATH + f'/{type_}_model.joblib'
        if os.path.exists(model_filename):
            model = joblib.load(model_filename)
            return model.predict(data)[0]
        return 0
    
    def insert_abnormal(self,hour_start,hour_end,destination_ip,source_ips,type,description,privacy_ids,count):
        sql = """
            INSERT INTO abnormal_privacy_data 
            (hour_start, hour_end, destination_ip, source_ips, type, description, privacy_ids, count) 
            VALUES 
            (%s, %s, %s, %s, %s, %s, %s, %s)
        """
        with self.db.insert(sql,(hour_start,hour_end,destination_ip,json.dumps(source_ips),type,description,json.dumps(privacy_ids),count)):
            pass
    
    def check_it1(self, df:pd.DataFrame, hour_start:datetime.datetime, hour_end:datetime.datetime):
        type_dict = {
        'all':{'filter_col':None, 'filter':None}, 
        'level':{'filter_col':'level', 'filter':['high','critical']}, 
        'data_size':{}, 
        'identity':{'filter_col':'privacy_model_name', 'filter':['身份信息']},
        'property ':{'filter_col':'privacy_model_name', 'filter':['财产信息']}, 
        'account':{'filter_col':'privacy_model_name', 'filter':['账户信息']},
        'credit':{'filter_col':'privacy_model_name', 'filter':['信用信息']}, 
        'financial':{'filter_col':'privacy_model_name', 'filter':['金融交易']},
        'authentication':{'filter_col':'privacy_model_name', 'filter':['鉴别信息']},
        'derived':{'filter_col':'privacy_model_name', 'filter':['衍生信息']},
        'reputational':{'filter_col':'privacy_model_name', 'filter':['声誉风险']},
        'encrypted':{'filter_col':'privacy_model_name', 'filter':['加密数据']},
                 }
        for type_ in type_dict.keys():
            if type_ == 'data_size':
                filter = None
                filter_col = None
                df2 = self.handle_data_size(df)
            else:
                filter_col = type_dict[type_]['filter_col']
                filter = type_dict[type_]['filter']
                df2 = self.handle_data(df, filter_col, filter)
            if df2.empty:
                continue
            test_list = df2.values.tolist()
            for test_data in test_list:
                pre = self.model_predict([test_data], type_)
                if int(pre):
                    ip = '.'.join([test_data[0],test_data[1],test_data[2],test_data[3]])
                    if filter_col:
                        df_ip = df[(df['destination_ip'] == ip) & df[filter_col].isin(filter)]
                    else:
                        df_ip = df[df['destination_ip'] == ip]
                    privacy_ids = df_ip['id'].tolist()
                    abnormal_count = test_data[-1]
                    source_ips = list(set(df_ip['source_ip'].tolist()))
                    description = f"时段[{hour_start.strftime('%Y-%m-%d %H:%M')}-{hour_end.strftime('%H:%M')}]异常数据[{abnormal_count}条]"
                    if type_ == 'data_size':
                        description = f"时段[{hour_start.strftime('%Y-%m-%d %H:%M')}-{hour_end.strftime('%H:%M')}]总文件大小异常[{abnormal_count}KB]"
                    self.insert_abnormal(hour_start,hour_end,ip,source_ips,type_,description,privacy_ids,abnormal_count)
            

    def check_it2(self, df:pd.DataFrame, hour_start:datetime.datetime, hour_end:datetime.datetime):
        pass
        
    
    def run(self):
        df, hour_start, hour_end = self.get_one_hour_data()
        if df.empty:
            return
        self.check_it1(df, hour_start, hour_end)

    def __del__(self):
        try:
            self.db_conn.session.remove()
        except Exception as e:
            print(e)



def run_abnormal_check():
    css = Abnormal_check()
    css.run()


if __name__ == '__main__':
    # run_abnormal_check()
    pass

