import json
import os
import time
import traceback
import uuid
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime

import requests
from xbase_util.common_util import check_path, split_data_by_chunk

from global_ver import save_logger, error_logger, current_dir
from src.util.common_util import format_timestamp


def save_pcap(stream, id):
    with open(check_path(os.path.join(current_dir, "pcap", f"{id}.pcap")), 'wb') as f:
        f.write(stream)


def save_to_es_thread(item_data):
    data = []
    for doc in item_data:
        data.append({"index": {"_index": "x_predict", "_id": doc['esId']}})
        data.append(doc)
    try:
        bulk_data = "\n".join([json.dumps(item) for item in data])
        bulk_data += "\n"
        response = requests.post("http://10.28.1.140:9200/_bulk", data=bulk_data,
                                 headers={"Content-Type": "application/x-ndjson"})
        if response.status_code == 200:
            items = response.json()['items']
            for item in items:
                if f"{item['index']['status']}" != "201":
                    save_logger.info(f"[保存队列]写入es报错:{item}")
            save_logger.info("[保存队列]写入es成功")
            return items
        else:
            save_logger.info("[保存队列]保存队列错误")
            save_logger.info(response.status_code)
            save_logger.info(response.text)
            save_logger.info(f"[保存队列]save_to_es错误:{response.text} 要保存:{len(item_data)}条")
            return None
    except Exception as e:
        error_logger.info("[保存队列]写入es失败")
        traceback.print_exc()
        return None


def save_to_java_thread(data_list_item):
    res = requests.post("http://10.28.6.144:8003/base-model-forecast-prd/modelForecastPrdPush", json=data_list_item)
    if f"{res.json()['code']}" != "200":
        error_logger.info(f"写入java报错:{res.text}")
    else:
        save_logger.info(f"写入java数据库成功:{res.text}")


def save_thread_pool(origin_data_list, stream_map, scene):
    data_list = []
    current_time = datetime.now()
    for data in origin_data_list:
        data_list.append({
            "arkimeId": data['id'],
            "esId": data['esId'],
            "sip": data['source.ip'],
            "spt": str(data['source.port']),
            "dip": data['destination.ip'],
            "dpt": str(data['destination.port']),
            "xffIp": str(data['http.xffIp']),
            "arkimeStartTime": str(format_timestamp(data['firstPacket'])),
            "arkimeEndTime": str(format_timestamp(data['lastPacket'])),
            "protocols": str(data['protocol']),
            "uri": str(data['http.uri']),
            "warnTime": str(
                current_time.strftime("%Y-%m-%d %H:%M:%S.") + str(current_time.microsecond // 1000).zfill(3)),
            "sceneType": scene,
            "tcpFlagsAck": str(data['tcpflags.ack']),
            "tcpFlagsSyn": str(data['tcpflags.syn']),
            "tcpFlagsSynAck": str(data['tcpflags.syn-ack']),
            "tcpFlagsUrg": str(data['tcpflags.urg']),
            "tcpFlagsPsh": str(data['tcpflags.psh']),
            "tcpFlagsFin": str(data['tcpflags.fin']),
            "tcpFlagsRst": str(data['tcpflags.rst']),
            "sBytes": str(data['source.bytes']),
            "sPayloadBytes": str(data['client.bytes']),
            "dBytes": str(data['destination.bytes']),
            "dPayloadBytes": str(data['server.bytes']),
            "totalPayloadBytes": str(data['client.bytes'] + data['server.bytes']),
            "sPackets": str(data['source.packets']),
            "dPackets": str(data['destination.packets']),
            "totalPackets": str(data['network.packets']),
            "totalBytes": str(data['totDataBytes']),
            "dnsHost": str(data['dns.host'])
        })
    # 如果数据不为空，开始并行处理
    if len(data_list) != 0:
        id_stream_list = [(session['id'], stream_map[session['id']]) for session in origin_data_list if
                          session['id'] in stream_map and stream_map[session['id']] is not None]
        for index, id_stream in enumerate(id_stream_list):
            save_logger.info(f"[保存队列]保存pcap：{index + 1}/{len(id_stream_list)}")
            save_pcap(id=id_stream[0],stream=id_stream[1])

        # 使用一个线程池同时执行保存到 ES 和 Java
        save_logger.info(f"[保存队列][{scene}]保存到java数据库:{len(data_list)}条")
        # 分割数据
        split_java = split_data_by_chunk(data_list, 1000)
        splited_es = split_data_by_chunk(origin_data_list, 1000)
        save_logger.info(f"[保存队列][{scene}]分批保存到ES，分割:{[len(item) for item in splited_es]}")
        save_logger.info(f"[保存队列][{scene}]分批保存到JAVA，分割:{[len(item) for item in split_java]}")
        with ThreadPoolExecutor(max_workers=20) as pool:  # 调整 max_workers 以支持更高并发
            future_list = []
            # 提交保存到ES的任务
            for item in splited_es:
                future = pool.submit(save_to_es_thread, item)
                future_list.append(future)
            # 提交保存到Java的任务
            for item in split_java:
                future = pool.submit(save_to_java_thread, item)
                future_list.append(future)
            # 等待所有任务完成
            for index, future in enumerate(as_completed(future_list)):
                res = future.result()  # 获取任务结果
                if res is not None:
                    save_logger.info(f"[保存队列][{scene}]第{index + 1}/{len(future_list)}")
        save_logger.info(f"[保存队列][{scene}]保存完毕，ES和Java数据库处理完成")
    else:
        save_logger.info(f"[保存队列][{scene}]数据为空，未保存")


def start_save_queue(save_queue):
    try:
        while True:
            save_logger.info(f"[保存队列]开始等待")
            param = save_queue.get()
            if param is None:
                save_logger.info("[保存队列]队列停止")
                break
            now = datetime.now()
            origin_data_list, stream_map, scene_type = param
            save_logger.info(f"[保存队列]数据总量：{len(origin_data_list)}")
            save_logger.info(f"[保存队列]异常数量：{len(origin_data_list)}")
            if len(origin_data_list) == 0:
                save_logger.info(f"[保存队列]异常数量为空，不去保存")
                continue
            for item in origin_data_list:
                item['esId'] = str(uuid.uuid4())
            save_thread_pool(origin_data_list, stream_map, scene_type)

            save_logger.info(f"[保存队列]保存完毕,耗时:{datetime.now() - now}")
            time.sleep(1)
    except Exception as e:
        save_logger.info("[保存队列]报错")
        traceback.print_exc()
        # clear_queue(save_queue)
