import os.path
import traceback
from datetime import timedelta, datetime

import pandas as pd
import requests
from xbase_util.add_column_util import handle_dns
from xbase_util.common_util import check_path, split_data_by_chunk
from xbase_util.dangerous_util import get_splunk_pa, get_splunk_waf

from global_ver import base_config, es_req, current_dir
from src.bean.ExportCsvBean import ExportCsvBean

def save_to_file(session_list, output, common_logger):
    common_logger.info(f"[导出csv数据]保存导出csv的文件数据")
    df = session_list.copy()
    df['isDangerous'] = df['paIsDangerous']
    columns_to_save = [col for col in df.columns if
                       col not in ['pcap_flow_text', 'predict_res', 'esId', 'paIsDangerous']]
    df.to_csv(output, columns=columns_to_save, index=False)


def set_label(session_list, start_time, end_time, output, sceneType, common_logger):
    try:
        session_list = session_list.fillna('')
        session_list['source.ip'] = session_list['source.ip'].astype(str)
        session_list['source.port'] = session_list['source.port'].astype(str)
        session_list['destination.ip'] = session_list['destination.ip'].astype(str)
        session_list['destination.port'] = session_list['destination.port'].astype(str)
        session_list['firstPacket'] = session_list['firstPacket'].astype(int)
        session_list['lastPacket'] = session_list['lastPacket'].astype(int)
        session_list['firstPacket_datetime'] = pd.to_datetime(session_list['firstPacket'], unit='ms').dt.tz_localize(
            'UTC').dt.tz_convert('Asia/Shanghai')
        session_list['lastPacket_datetime'] = pd.to_datetime(session_list['lastPacket'], unit='ms').dt.tz_localize(
            'UTC').dt.tz_convert('Asia/Shanghai')
        session_list['paIsDangerous'] = False
        session_list['PROTOCOL'] = ''
        session_list['DENY_METHOD'] = ''
        session_list['THREAT_SUMMARY'] = ''
        session_list['SEVERITY'] = ''

        if sceneType != "normal":
            common_logger.info("[导出csv数据]不是normal，打标签结果，isDangerous使用接口给的，而paIsDangerous为")
            session_list.drop(columns=['firstPacket_datetime', 'lastPacket_datetime'], inplace=True)
            session_list['paIsDangerous'] = session_list['isDangerous']
        else:
            common_logger.info(f"[导出csv数据]开始获取异常数据库")
            duration = base_config['label_duration']
            threat_pa = pd.DataFrame(get_splunk_pa(start_time=start_time, end_time=end_time,
                                                   splunk_host=base_config['splunk']['host'],
                                                   splunk_port=base_config['splunk']['port'],
                                                   splunk_scheme=base_config['splunk']['scheme'],
                                                   splunk_username=base_config['splunk']['username'],
                                                   splunk_password=base_config['splunk']['splunk_password'],
                                                   splunk_filter=base_config['splunk']['splunk_filter'],
                                                   ))
            threat_pa['type'] = 'ids'
            threat_waf = pd.DataFrame(get_splunk_waf(start_time=start_time, end_time=end_time,
                                                     splunk_host=base_config['splunk']['host'],
                                                     splunk_port=base_config['splunk']['port'],
                                                     splunk_scheme=base_config['splunk']['scheme'],
                                                     splunk_username=base_config['splunk']['username'],
                                                     splunk_password=base_config['splunk']['splunk_password']
                                                     ))
            threat_waf['type'] = 'waf'
            common_logger.info(f"[导出csv数据]异常数量waf:{len(threat_waf)}|pa:{len(threat_pa)}")
            dangerous_all = pd.concat([threat_pa, threat_waf])
            if len(dangerous_all) == 0:
                common_logger.info("[导出csv数据]异常数量为空，打标签结束")
                session_list.drop(columns=['firstPacket_datetime', 'lastPacket_datetime'], inplace=True)
            else:
                # 处理异常数据
                dangerous_all.fillna('', inplace=True)
                dangerous_all['SIP'] = dangerous_all['SIP'].astype(str)
                dangerous_all['S_PORT'] = dangerous_all['S_PORT'].astype(str)
                dangerous_all['DIP'] = dangerous_all['DIP'].astype(str)
                dangerous_all['D_PORT'] = dangerous_all['D_PORT'].astype(str)
                dangerous_all['THREAT_TIME'] = pd.to_datetime(dangerous_all['THREAT_TIME'],
                                                              format='%Y-%m-%d %H:%M:%S')

                count = 0
                for ind, (i, d_item) in enumerate(dangerous_all.iterrows()):
                    common_logger.info(f"[导出csv数据]进度：{ind + 1}/{len(dangerous_all)}")
                    if d_item['type'] == 'ids':
                        sip = session_list['source.ip']
                    else:
                        sip = session_list['http.xffIp'].apply(
                            lambda x: x[0] if isinstance(x, list) and len(x) > 0 else "")
                    threat_time = d_item['THREAT_TIME'].tz_localize('Asia/Shanghai')
                    threat_start_time = threat_time - timedelta(minutes=duration)
                    threat_end_time = threat_time + timedelta(minutes=duration)
                    result_session = session_list[(
                            (sip == d_item["SIP"]) &
                            (session_list['source.port'] == d_item["S_PORT"]) &
                            (session_list['destination.ip'] == d_item["DIP"]) &
                            (session_list['destination.port'] == d_item["D_PORT"]) &
                            (session_list['firstPacket_datetime'] >= threat_start_time) &
                            (session_list['lastPacket_datetime'] <= threat_end_time)
                    )]
                    count = count + len(result_session)
                    if len(result_session) != 0:
                        common_logger.info(
                            f'[导出csv数据]匹配到：{len(result_session)},共匹配到：{count}')
                        session_list.loc[session_list['id'].isin(result_session['id']), [
                            'DENY_METHOD', 'PROTOCOL', 'SEVERITY', 'THREAT_SUMMARY', 'traffic_type',
                            'paIsDangerous']] = [
                            d_item['DENY_METHOD'],
                            d_item['PROTOCOL'],
                            d_item['SEVERITY'],
                            d_item['type'],
                            d_item['THREAT_SUMMARY'],
                            True
                        ]
                session_list.drop(columns=['firstPacket_datetime', 'lastPacket_datetime'], inplace=True,
                                  errors='ignore')
        save_to_file(session_list, output, common_logger)
        session_list = session_list[["esId", "isDangerous", "paIsDangerous"]]
        common_logger.info("[导出csv数据]导出csv完毕")
        return session_list, session_list.columns.tolist()
    except Exception as e:
        traceback.print_exc()
        common_logger.info(f"[导出csv数据]报错:{e}")
        return []


def getOriginIsDangerous(esId, origin_data):
    for item in origin_data:
        if item['esId'] == esId:
            return item['isDangerous']
    return "1"


def adjust_time_to_five_minutes(start_time, end_time):
    if start_time > end_time:
        start_time, end_time = end_time, start_time
    delta = end_time - start_time
    if delta < timedelta(minutes=5):
        half_gap = timedelta(minutes=2.5)
        mid_time = start_time + delta / 2  # 计算中间时间
        new_start_time = mid_time - half_gap
        new_end_time = mid_time + half_gap
        return new_start_time, new_end_time
    return start_time, end_time


def set_label_task(bean: ExportCsvBean, common_logger):
    requestCode = bean.requestCode
    callBackUrl = bean.callBackUrl
    ids = list(set([item['esId'] for item in bean.data]))
    batch_size = 500
    origin_hits = []
    for i in range(0, len(ids), batch_size):
        common_logger.info(f"[导出csv数据]获取数据：{i + 1}/{len(ids)}")
        batch_ids = ids[i:i + batch_size]
        res = es_req.search(body={
            "size": batch_size,
            "query": {
                "ids": {
                    "values": batch_ids
                }
            }
        }, index="x_predict")
        if res.status_code == 200:
            origin_hits.extend(res.json()['hits']['hits'])
    common_logger.info(f"[导出csv数据]获取结束，共:{len(origin_hits)}")
    if len(origin_hits) == 0:
        common_logger.info("[导出csv数据]没有数据")
        return
    else:
        es_data = []
        if len(origin_hits) == 0:
            common_logger.info("[导出csv数据]回调，没查到相关数据")
            return
        for hits in origin_hits:
            map = hits['_source']
            map['esId'] = hits['_id']
            map["isDangerous"] = getOriginIsDangerous(esId=map['esId'], origin_data=bean.data)
            es_data.append(map)
        startTime = datetime.fromtimestamp(min(es_data, key=lambda x: x["firstPacket"])["firstPacket"] / 1000.0)
        endTime = datetime.fromtimestamp(max(es_data, key=lambda x: x["lastPacket"])["lastPacket"] / 1000.0)
        startTime, endTime = adjust_time_to_five_minutes(startTime, endTime)
        df = handle_dns(pd.DataFrame(es_data), isDataFrame=True)
        path = os.path.join(current_dir, "out", f"{requestCode}_{datetime.now().strftime('%Y%m%d%H%M%S')}.csv")
        label_res, columns = set_label(df, startTime, endTime, check_path(path), bean.sceneType, common_logger)
        chunks = split_data_by_chunk(label_res.to_dict(orient='records'), 1000)
        # callBackUrl = "http://10.28.6.:8003/base-model-forecast-prd/callBackModelForecastPrdCreateCsv"
        for index, chunk in enumerate(chunks):
            common_logger.info(f"[导出csv数据]回掉进度：{index + 1}/{len(chunks)}|回调:{len(chunk)}条|{callBackUrl}")
            res = requests.post(callBackUrl, json={
                "requestCode": requestCode,
                "fileName": path.split("/")[-1],
                "sourceFilePath": path,
                "fileSize": os.path.getsize(path),
                "dataNumber": len(label_res),
                "fileHeaderConfig": columns,
                "data": label_res.to_dict(orient='records')
            })
            common_logger.info(f"[导出csv数据]保存结果{res.text}")
