from flask import Blueprint, request, current_app
from applications.extensions.init_websocket import publish_msg
from applications.common.http import success_api, fail_api
import numpy as np
import io
import datetime
import os
import random
import json
# 使用zlib压缩数据
import zlib
import threading
import time
from flask import Blueprint

import shutil
from flask import send_file
import requests

# 创建蓝图
bp = Blueprint('index', __name__, url_prefix='/index')

# 添加全局变量和锁来管理IMU数据重放
imu_replay_thread = None
imu_replay_stop_event = threading.Event()
imu_replay_lock = threading.Lock()


@bp.get('/')
def index():
    publish_msg({
        "event": "task_progress",
        "data": {
            "task_id": "1",
            "task_name": "hello world!"
        }
    })

    return "hello world!"


@bp.get('/getVideo')
def get_video():
    track_host = current_app.redis_client.get("trackHost")
    if track_host is None:
        track_host = "192.168.3.50"
    return success_api(msg="成功", data={
        "webrtc": "/zl/index/api/webrtc?app=live&stream=test&type=play",
        "mp4": "/zl/live/test.live.mp4",
        "flv": f"http://{track_host}:8080/flv_stream",
        "hls": "/zl/live/test/hls.m3u8",
    })


def replay_imu_data(batch_id, replay_count):
    try:
        # 根据是否提供了批次ID来确定数据路径
        if batch_id:
            imu_data_dir = f"cache_data/{batch_id}/imu_data"
        else:
            imu_data_dir = "imu_data"

        # 检查文件夹是否存在
        if not os.path.exists(imu_data_dir):
            if batch_id:
                return fail_api(msg=f"批次 {batch_id} 的IMU数据文件夹不存在")
            else:
                return fail_api(msg="imu_data 文件夹不存在")

        # 判断文件 imu_data_dir/imu_data.txt 是否存在
        imu_data_file = os.path.join(imu_data_dir, "imu_data.txt")
        if not os.path.exists(imu_data_file):
            if batch_id:
                return fail_api(msg=f"批次 {batch_id} 的IMU数据文件夹下不存在 imu_data.txt 文件")
            else:
                return fail_api(msg="imu_data 文件夹下不存在 imu_data.txt 文件")
        # 使用锁确保线程安全地停止旧的重放任务并启动新的
        with imu_replay_lock:
            # 停止任何正在进行的重放任务
            stop_current_imu_replay()

            # 创建新的停止事件
            global imu_replay_stop_event
            imu_replay_stop_event = threading.Event()

            # 创建并启动新的重放线程，传入数据目录
            global imu_replay_thread
            imu_replay_thread = threading.Thread(
                target=imu_replay_worker,
                args=(imu_data_file, imu_replay_stop_event, imu_data_dir, replay_count)
            )
            imu_replay_thread.daemon = True
            imu_replay_thread.start()
        return success_api(msg=f"开始重放批次 {batch_id} 的IMU数据文件")

    except Exception as e:
        return fail_api(msg=f"重放IMU数据失败: {str(e)}")


# IMU数据重放的工作线程函数
# 修改后使用逐行读取方式处理大文件
# 适配单行数据格式: current_time|data
def imu_replay_worker(imu_data_file, stop_event, data_dir, replay_count):
    try:
        while replay_count > 0:
            # 打开文件进行逐行读取
            try:
                with open(imu_data_file, 'r') as f:
                    prev_data_point = None  # 保存前一个有效的数据点

                    while True:
                        # 检查是否需要停止
                        if stop_event.is_set():
                            print("IMU数据重放已停止")
                            break

                        # 读取一行数据
                        line = f.readline()

                        # 当readline()返回空字符串时，表示已到达文件末尾
                        if not line:
                            print(f"文件 {imu_data_file} 重放完成，已到达文件末尾")
                            break

                        # 处理当前行
                        line = line.strip()
                        # 跳过纯空白行（但不结束重放）
                        if not line:
                            continue

                        try:
                            # 按分隔符'|'分割行数据
                            timestamp_str, data = line.split('|', 1)
                            # 解析时间戳格式: %Y%m%d%H%M%S%f (毫秒级精度)
                            timestamp = datetime.datetime.strptime(
                                timestamp_str, "%Y%m%d%H%M%S%f"
                            )
                            current_data_point = (timestamp, data)

                        except ValueError as e:
                            print(f"解析行数据失败 '{line}': {str(e)}")
                            continue

                        # 如果有前一个数据点，计算时间间隔并等待
                        if prev_data_point:
                            prev_timestamp, _ = prev_data_point
                            interval = (timestamp - prev_timestamp).total_seconds()

                            # 按照实际间隔等待
                            stop_event.wait(interval)
                            # 再次检查是否需要停止
                            if stop_event.is_set():
                                print("IMU数据重放已停止")
                                break

                        # 发布当前数据点
                        try:
                            publish_msg({
                                "event": "imu",
                                "data": zlib.compress(data.encode('utf-8'))
                            })
                            print(
                                f"已发送数据点 (时间戳: {timestamp.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]})" if prev_data_point else \
                                    f"已发送首个数据点 (时间戳: {timestamp.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]})"
                                )
                        except Exception as e:
                            print(f"发送数据失败: {str(e)}")

                        # 更新前一个数据点
                        prev_data_point = current_data_point

                # 当前轮重放完成
                replay_count -= 1
                if replay_count > 0:
                    print(f"准备开始第 {replay_count} 次重放")
                else:
                    print("所有重放轮次已完成")

            except Exception as e:
                print(f"读取文件 {imu_data_file} 失败: {str(e)}")
                return

            # 如果需要继续重放，再次打开文件
            if replay_count > 0 and not stop_event.is_set():
                continue
            else:
                break
    except Exception as e:
        print(f"IMU数据重放线程发生错误: {str(e)}")


# 停止当前IMU数据重放的函数
def stop_current_imu_replay():
    global imu_replay_stop_event, imu_replay_thread

    # 设置停止事件
    if imu_replay_stop_event:
        imu_replay_stop_event.set()

    # 等待线程结束
    if imu_replay_thread and imu_replay_thread.is_alive():
        imu_replay_thread.join(timeout=1.0)  # 等待最多1秒


# 在IMU相关全局变量后添加激光雷达重放的全局变量
imu_replay_thread = None
imu_replay_stop_event = threading.Event()
imu_replay_lock = threading.Lock()

# 添加激光雷达数据重放的全局变量
sensor_replay_thread = None
sensor_replay_stop_event = threading.Event()
sensor_replay_lock = threading.Lock()


def replay_lidar_data(batch_id, replay_count):
    try:
        # 根据是否提供了批次ID来确定数据路径
        if batch_id:
            lidar_data_dir = f"cache_data/{batch_id}/lidar_data"
        else:
            lidar_data_dir = "lidar_data"

        # 检查文件夹是否存在
        if not os.path.exists(lidar_data_dir):
            if batch_id:
                return fail_api(msg=f"批次 {batch_id} 的激光雷达数据文件夹不存在")
            else:
                return fail_api(msg="lidar_data 文件夹不存在")

        # 查找所有 .npy 文件
        bin_files = [f for f in os.listdir(lidar_data_dir) if f.endswith('.npy')]
        if not bin_files:
            if batch_id:
                return fail_api(msg=f"批次 {batch_id} 的激光雷达数据文件夹下没有 .npy 文件")
            else:
                return fail_api(msg="lidar_data 文件夹下没有 .npy 文件")

        # 使用锁确保线程安全地停止旧的重放任务并启动新的
        with sensor_replay_lock:
            # 停止任何正在进行的重放任务
            stop_current_sensor_replay()

            # 创建新的停止事件
            global sensor_replay_stop_event
            sensor_replay_stop_event = threading.Event()

            # 创建并启动新的重放线程，传入数据目录
            global sensor_replay_thread
            sensor_replay_thread = threading.Thread(
                target=lidar_replay_worker,
                args=(bin_files, sensor_replay_stop_event, lidar_data_dir, replay_count)
            )
            sensor_replay_thread.daemon = True
            sensor_replay_thread.start()

        if batch_id:
            return success_api(msg=f"开始重放批次 {batch_id} 的 {len(bin_files)} 个激光雷达数据文件")
        else:
            return success_api(msg=f"开始重放 {len(bin_files)} 个激光雷达数据文件")
    except Exception as e:
        return fail_api(msg=f"重放激光雷达数据失败: {str(e)}")


# 激光雷达数据重放的工作线程函数
# 添加data_dir参数
def lidar_replay_worker(npy_files, stop_event, data_dir, replay_count):
    try:
        # 获取文件路径和修改时间的映射
        file_info = []
        for file_name in npy_files:
            file_path = os.path.join(data_dir, file_name)
            try:
                # 获取文件的修改时间
                mtime = os.path.getmtime(file_path)
                # 转换为datetime对象
                mtime_dt = datetime.datetime.fromtimestamp(mtime)
                file_info.append((file_path, file_name, mtime_dt))
            except Exception as e:
                print(f"获取文件 {file_name} 信息失败: {str(e)}")
                continue

        # 如果没有成功获取任何文件信息，直接返回
        if not file_info:
            print("没有成功获取任何激光雷达文件的信息")
            return

        # 按照文件修改时间排序
        file_info.sort(key=lambda x: x[2])

        # 处理文件
        prev_mtime = None
        while True:
            for file_path, file_name, current_mtime in file_info:
                # 检查是否需要停止
                if stop_event.is_set():
                    print("激光雷达数据重放已停止")
                    break

                # 计算与前一个文件的时间间隔
                if prev_mtime:
                    interval = (current_mtime - prev_mtime).total_seconds()
                    # 按照实际间隔等待
                    stop_event.wait(interval)
                    # 再次检查是否需要停止
                    if stop_event.is_set():
                        print("激光雷达数据重放已停止")
                        break

                # 读取文件内容
                try:
                    # 读取文件内容
                    try:
                        # 使用np.load读取NumPy文件内容
                        lidar_array = np.load(file_path)
                        # 将NumPy数组转换为Python列表，以便可以序列化为JSON
                        lidar_list = lidar_array.tolist()
                        # 将列表转换为JSON字符串
                        json_data = json.dumps(lidar_list)
                        # 将JSON字符串编码为字节数据并压缩
                        compressed_data = zlib.compress(json_data.encode('utf-8'))
                        publish_msg({
                            "event": "lidar",
                            "data": compressed_data
                        })

                    except Exception as e:
                        print(f"读取或处理文件 {file_name} 失败: {str(e)}")
                        prev_mtime = current_mtime
                        continue

                    print(f"已发送文件: {file_name} (修改时间: {current_mtime.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]})")
                except Exception as e:
                    print(f"读取文件 {file_name} 失败: {str(e)}")

                # 更新前一个文件的修改时间
                prev_mtime = current_mtime
            replay_count = replay_count - 1
            if replay_count <= 0:
                break
    except Exception as e:
        print(f"激光雷达数据重放线程发生错误: {str(e)}")


# 停止当前激光雷达数据重放的函数
def stop_current_sensor_replay():
    global sensor_replay_stop_event, sensor_replay_thread

    # 设置停止事件
    if sensor_replay_stop_event:
        sensor_replay_stop_event.set()

    # 等待线程结束
    if sensor_replay_thread and sensor_replay_thread.is_alive():
        sensor_replay_thread.join(timeout=1.0)  # 等待最多1秒


@bp.get("/stop_replay")
def stop_replay():
    # 停止当前IMU数据重放
    stop_current_imu_replay()
    # 停止当前激光雷达数据重放
    stop_current_sensor_replay()
    return success_api(msg=f"重放已停止")


@bp.get('/replay_batch_data')
def replay_batch_data():
    try:
        # 获取批次ID参数
        batch_id = request.args.get('batch_id')
        replay_count = request.args.get('replay_count')
        if replay_count:
            replay_count = int(replay_count)
        else:
            replay_count = 100
        # 停止当前IMU数据重放
        stop_current_imu_replay()
        # 停止当前激光雷达数据重放
        stop_current_sensor_replay()
        # 直接调用函数并传递batch_id参数
        if batch_id:
            # 重新播放IMU数据
            replay_imu_data(batch_id, replay_count)
            # 重新播放激光雷达数据
            replay_lidar_data(batch_id, replay_count)
            return success_api(msg=f"已重新播放批次 {batch_id} 的所有数据")
        else:
            # 如果没有提供批次ID
            return fail_api(msg="没有传入 batch_id ")
    except Exception as e:
        return fail_api(msg=f"重新播放数据失败: {str(e)}")


@bp.get('/remove_batch')
def remove_batch():
    try:
        # 获取批次ID参数
        batch_id = request.args.get('batch_id')

        # 停止当前IMU数据重放
        stop_current_imu_replay()
        # 停止当前激光雷达数据重放
        stop_current_sensor_replay()

        if batch_id:
            # 清除指定批次的IMU数据
            # 直接删除指定批次的缓存数据目录，包含目录下所有文件
            cache_data_dir = f"cache_data/{batch_id}"
            if os.path.exists(cache_data_dir):
                shutil.rmtree(cache_data_dir)

            return success_api(msg=f"已清除批次 {batch_id} 的所有数据缓存")
        else:
            return fail_api(msg="没有传入 batch_id ")
    except Exception as e:
        return fail_api(msg=f"清除数据缓存失败: {str(e)}")


@bp.get("/start_batch")
def start_batch():

    return success_api(msg="已开始新的批次记录")


@bp.get("/stop_batch")
def stop_batch():

    return success_api(msg="已停止当前批次记录")


@bp.get("/get_batch_switch")
def get_batch_switch():
    current_batch_id = current_app.redis_client.get("batch_id")
    batch_switch = current_app.redis_client.get("batch_switch")
    return success_api(data={"batch_switch": batch_switch, "current_batch_id": current_batch_id})


def _get_batch_list():
    batch_list = []
    for file_name in os.listdir("cache_data"):
        if file_name.isdigit():
            batch_list.append(file_name)
    return batch_list


@bp.get("/get_batch_list")
def get_batch_list():
    current_batch_id = current_app.redis_client.get("batch_id")
    batch_switch = current_app.redis_client.get("batch_switch")
    batch_ids = _get_batch_list()
    batch_list = []
    for batch_id in batch_ids:
        status = "stop"
        if batch_id == current_batch_id and batch_switch == "1":
            status = "running"
        batch_list.append({"batch_id": batch_id, "status": status})

    return success_api(data={"batch_list": batch_list})


@bp.get("/get_current_batch_id")
def get_current_batch_id():
    current_batch_id = current_app.redis_client.get("batch_id")
    batch_switch = current_app.redis_client.get("batch_switch")
    return success_api(data={"current_batch_id": current_batch_id, "batch_switch": batch_switch})


@bp.get("/send_video")
def send_video():
    publish_msg({
        "event": "video",
        "data": {
            "flv": f"http://10.10.18.50:8080/flv_stream",
        }
    })
    return success_api()


@bp.post('/cap_picture')
def cap_picture():
    try:
        mjpeg_url = request.json.get('url')
        if not mjpeg_url:
            return fail_api(msg="未提供 MJPEG 流地址")

        # 连接 MJPEG 流，读取一帧 JPEG 图片
        resp = requests.get(mjpeg_url, stream=True, timeout=5)
        boundary = None
        for header in resp.headers.get('content-type', '').split(';'):
            if 'boundary=' in header:
                boundary = header.split('=')[1].strip()
        if not boundary:
            boundary = '--frame'

        buf = b''
        for chunk in resp.iter_content(chunk_size=1024):
            buf += chunk
            start = buf.find(b'\xff\xd8')
            end = buf.find(b'\xff\xd9')
            if start != -1 and end != -1 and end > start:
                jpeg_data = buf[start:end+2]
                break
        else:
            return fail_api(msg="未能从 MJPEG 流中获取图片")

        return send_file(
            io.BytesIO(jpeg_data),
            mimetype='image/jpeg',
            as_attachment=True,
            download_name='capture.jpg'
        )
    except Exception as e:
        return fail_api(msg=f"图片截取失败: {str(e)}")
