import torch
import numpy as np
import os,cv2,time,torch,random,pytorchvideo,warnings,argparse,math
warnings.filterwarnings("ignore",category=UserWarning)

from pytorchvideo.transforms.functional import (
    uniform_temporal_subsample,
    short_side_scale_with_boxes,
    clip_boxes_to_image,)
from torchvision.transforms._functional_video import normalize
from pytorchvideo.data.ava import AvaLabeledVideoFramePaths
from pytorchvideo.models.hub import slowfast_r50_detection
from deep_sort.deep_sort import DeepSort

# 在文件顶部新增导入
import json
from collections import defaultdict
from datetime import datetime

"""行为检测数据记录器"""
class BehaviorRecorder:

    def __init__(self, task_id):
        self.task_id = task_id
        self.data = {
            "task_id": task_id,
            "start_time": datetime.now().isoformat(),
            "total_frames": 0,
            "fps": 0,
            "object_counter": defaultdict(int),
            "behavior_stats": defaultdict(int),
            "abnormal_events": [],
            "timeline": [],
            "frames": []
        }
        self.current_abnormal = None
        self.next_timeline_marker = 0.0  # 初始化第一个时间标记为5秒


    def record_frame(self, frame_num, objects):
        """记录单帧数据（修复触发条件）"""
        # 计算当前时间（秒）
        current_time = frame_num / self.data["fps"] if self.data["fps"] > 0 else 0

        # ================= 新增逻辑 =================
        # 当超过时间标记时记录时间线（支持短视频）
        while current_time >= self.next_timeline_marker:
            self.data["timeline"].append({
                "time": self.next_timeline_marker,
                "behaviors": list([o["behavior"] for o in objects]),
                # "behaviors": list(set([o["behavior"] for o in objects])),
                "abnormal": any(o["behavior"] in ["run"] for o in objects)
            })
            self.next_timeline_marker += 1.0  # 下一个1秒标记

        frame_data = {
            "frame": frame_num,
            "time": frame_num / self.data["fps"],
            "objects": [],
            "abnormal": False
        }

        for obj in objects:
            # 记录对象信息
            obj_entry = {
                "id": int(obj["id"]),
                "type": obj["class"],
                "bbox": [float(x) for x in obj["bbox"]],
                "behavior": obj["behavior"],
                "confidence": float(obj["confidence"])
            }
            frame_data["objects"].append(obj_entry)

            # 统计信息
            self.data["object_counter"][obj["class"]] += 1
            self.data["behavior_stats"][obj["behavior"]] += 1

            # 异常检测逻辑
            # if obj["behavior"] in ["fighting", "falling"]:
            if obj["behavior"] in ["run"]:
                frame_data["abnormal"] = True
                self._handle_abnormal_event(
                    behavior=obj["behavior"],
                    frame=frame_num,
                    obj_id=obj["id"]
                )

        self.data["frames"].append(frame_data)

        # 每5秒记录时间线
        # if frame_num % (5 * self.data["fps"]) == 0:
        #     self.data["timeline"].append({
        #         "time": frame_data["time"],
        #         "behaviors": list(set([o["behavior"] for o in frame_data["objects"]])),
        #         "abnormal": frame_data["abnormal"]
        #     })

    def _handle_abnormal_event(self, behavior, frame, obj_id):
        """处理异常事件连续性"""
        if self.current_abnormal and self.current_abnormal["type"] == behavior:
            self.current_abnormal["end_frame"] = frame
            self.current_abnormal["duration"] = (
                    (frame - self.current_abnormal["start_frame"]) / self.data["fps"])
        else:
            if self.current_abnormal:  # 保存上一个事件
                self.data["abnormal_events"].append(self.current_abnormal)
            self.current_abnormal = {
                "type": behavior,
                "start_frame": frame,
                "end_frame": frame,
                "objects": [obj_id],
                "duration": 0
            }
        self.current_abnormal["objects"].append(obj_id)

    def save(self, output_dir):
        """保存分析结果（新增最终时间点检查）"""
        # ================= 新增逻辑 =================
        # 强制记录最后一个时间点
        if self.data["frames"]:
            last_frame = self.data["frames"][-1]
            last_time = last_frame["time"]

            # 如果视频结束时未达到下一个标记，但超过上一个标记的50%
            if (last_time > self.next_timeline_marker - 2.5) and (last_time < self.next_timeline_marker):
                self.data["timeline"].append({
                    "time": last_time,
                    "behaviors": list(set([o["behavior"] for o in last_frame["objects"]])),
                    "abnormal": last_frame["abnormal"]
                })

        if self.current_abnormal:
            self.data["abnormal_events"].append(self.current_abnormal)

        self.data.update({
            "end_time": datetime.now().isoformat(),
            "processing_time": (datetime.now() - datetime.fromisoformat(self.data["start_time"])).total_seconds(),
            "object_counter": dict(self.data["object_counter"]),
            "behavior_stats": dict(self.data["behavior_stats"])
        })

        output_path = os.path.join(output_dir, f"analysis_{self.task_id}.json")
        with open(output_path, "w") as f:
            json.dump(self.data, f, indent=2)


class MyVideoCapture:
    
    # def __init__(self, source):
    #     self.cap = cv2.VideoCapture(source)
    #     self.idx = -1
    #     self.end = False
    #     self.stack = []

    def __init__(self, source, progress_callback=None):
        self.cap = cv2.VideoCapture(source)
        self.progress_callback = progress_callback
        self.idx = -1
        self.end = False
        self.stack = []

    def read(self):
        self.idx += 1
        ret, img = self.cap.read()
        if ret:
            self.stack.append(img)
            # 触发进度回调
            if self.progress_callback:
                total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
                self.progress_callback(self.idx, total_frames)
        else:
            self.end = True
        return ret, img


    # def read(self):
    #     self.idx += 1
    #     ret, img = self.cap.read()
    #     if ret:
    #         self.stack.append(img)
    #     else:
    #         self.end = True
    #     return ret, img
    
    def to_tensor(self, img):
        img = torch.from_numpy(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
        return img.unsqueeze(0)
        
    def get_video_clip(self):
        assert len(self.stack) > 0, "clip length must large than 0 !"
        self.stack = [self.to_tensor(img) for img in self.stack]
        clip = torch.cat(self.stack).permute(-1, 0, 1, 2)
        del self.stack
        self.stack = []
        return clip
    
    def release(self):
        self.cap.release()
        
def tensor_to_numpy(tensor):
    img = tensor.cpu().numpy().transpose((1, 2, 0))
    return img

def ava_inference_transform(
    clip, 
    boxes,
    num_frames = 32, #if using slowfast_r50_detection, change this to 32, 4 for slow 
    crop_size = 640, 
    data_mean = [0.45, 0.45, 0.45], 
    data_std = [0.225, 0.225, 0.225],
    slow_fast_alpha = 4, #if using slowfast_r50_detection, change this to 4, None for slow
):
    boxes = np.array(boxes)
    roi_boxes = boxes.copy()
    clip = uniform_temporal_subsample(clip, num_frames)
    clip = clip.float()
    clip = clip / 255.0
    height, width = clip.shape[2], clip.shape[3]
    boxes = clip_boxes_to_image(boxes, height, width)
    clip, boxes = short_side_scale_with_boxes(clip,size=crop_size,boxes=boxes,)
    clip = normalize(clip,
                    np.array(data_mean, dtype=np.float32),
                    np.array(data_std, dtype=np.float32),)
    boxes = clip_boxes_to_image(boxes, clip.shape[2],  clip.shape[3])
    if slow_fast_alpha is not None:
        fast_pathway = clip
        slow_pathway = torch.index_select(clip,1,
            torch.linspace(0, clip.shape[1] - 1, clip.shape[1] // slow_fast_alpha).long())
        clip = [slow_pathway, fast_pathway]
    
    return clip, torch.from_numpy(boxes), roi_boxes

def plot_one_box(x, img, color=[100,100,100], text_info="None",
                 velocity=None, thickness=1, fontsize=0.5, fontthickness=1):
    # Plots one bounding box on image img
    c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
    cv2.rectangle(img, c1, c2, color, thickness, lineType=cv2.LINE_AA)
    t_size = cv2.getTextSize(text_info, cv2.FONT_HERSHEY_TRIPLEX, fontsize , fontthickness+2)[0]
    cv2.rectangle(img, c1, (c1[0] + int(t_size[0]), c1[1] + int(t_size[1]*1.45)), color, -1)
    cv2.putText(img, text_info, (c1[0], c1[1]+t_size[1]+2), 
                cv2.FONT_HERSHEY_TRIPLEX, fontsize, [255,255,255], fontthickness)
    return img

def deepsort_update(Tracker, pred, xywh, np_img):
    outputs = Tracker.update(xywh,
                             pred[:,4:5],
                             pred[:,5].tolist(),
                             cv2.cvtColor(np_img,cv2.COLOR_BGR2RGB)
                             )
    return outputs

def save_yolopreds_tovideo(yolo_preds, id_to_ava_labels, color_map, output_video, vis=False):
    for i, (im, pred) in enumerate(zip(yolo_preds.ims, yolo_preds.pred)):
        im=cv2.cvtColor(im,cv2.COLOR_BGR2RGB)
        if pred.shape[0]:
            for j, (*box, cls, trackid, vx, vy) in enumerate(pred):
                if int(cls) != 0:
                    ava_label = ''
                elif trackid in id_to_ava_labels.keys():
                    ava_label = id_to_ava_labels[trackid].split(' ')[0]
                else:
                    ava_label = 'Unknow'
                text = '{} {} {}'.format(int(trackid),yolo_preds.names[int(cls)],ava_label)
                color = color_map[int(cls)]
                im = plot_one_box(box,im,color,text)
        im = im.astype(np.uint8)
        output_video.write(im)
        if vis:
            im=cv2.cvtColor(im,cv2.COLOR_RGB2BGR)
            cv2.imshow("demo", im)

# 转h264存储
import subprocess
def convert_video(input_path, output_path):
    command = [
        'ffmpeg',
        '-y',  # 覆盖输出文件
        '-i', input_path,
        '-c:v', 'libx264',
        '-profile:v', 'baseline',
        '-pix_fmt', 'yuv420p',
        '-movflags', '+faststart',
        output_path
    ]
    subprocess.run(command, check=True)


def main(config):
    try:
        device = config.device
        imsize = config.imsize

        """加载yolo模型"""
        # model = torch.hub.load('ultralytics/yolov5', 'yolov5l6').to(device)
        model = torch.hub.load('./yolov5', 'yolov5l6', source='local')
        # model_path = "/home/sjwusj/Work/yolo_slowfast/yolov5l6.pt"
        # model = torch.load(model_path, map_location=device)
        # model = model.to(device).eval()
        model.conf = config.conf
        model.iou = config.iou
        model.max_det = 100
        if config.classes:
            model.classes = config.classes

        """加载slowfast模型"""
        video_model = slowfast_r50_detection(True).eval().to(device)
        # video_model_path = "/home/sjwusj/Work/yolo_slowfast/checkpoints/SLOWFAST_8x8_R50_DETECTION.pyth"
        # video_model = torch.load(video_model_path, map_location=device)
        # video_model = video_model.to(device).eval()

        deepsort_tracker = DeepSort("deep_sort/deep_sort/deep/checkpoint/ckpt.t7")
        ava_labelnames,_ = AvaLabeledVideoFramePaths.read_label_map("selfutils/temp.pbtxt")
        coco_color_map = [[random.randint(0, 255) for _ in range(3)] for _ in range(80)]

        vide_save_path = config.output
        video=cv2.VideoCapture(config.input)
        width,height = int(video.get(3)),int(video.get(4))
        video.release()

        #视频写入器配置
        outputvideo = cv2.VideoWriter(
            vide_save_path,
            # 修改为以下任一H.264编码标识
            # cv2.VideoWriter.fourcc(*'avc1') ,
            # cv2.VideoWriter.fourcc(*'h264'),
            # cv2.VideoWriter_fourcc(*'mp4v'),
            cv2.VideoWriter.fourcc(*'mp4v'),
            # cv2.VideoWriter_fourcc('M', 'P', '4', 'V'),
            25,
            (width,height)
        )

        print("processing...")

        # 接收进度回调
        progress_callback = getattr(config, 'progress_callback', None)
        # 修改视频捕获初始化
        cap = MyVideoCapture(config.input, progress_callback=progress_callback)

        # cap = MyVideoCapture(config.input)
        id_to_ava_labels = {}
        a=time.time()

        # 添加总帧数计算
        total_frames = int(cap.cap.get(cv2.CAP_PROP_FRAME_COUNT))
        processed_frames = 0

        # 初始化行为记录器
        recorder = BehaviorRecorder(config.task_id)  # 需要从config传入task_id

        # 原有视频处理初始化...
        video = cv2.VideoCapture(config.input)
        recorder.data["fps"] = int(video.get(cv2.CAP_PROP_FPS))
        recorder.data["total_frames"] = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
        video.release()

        while not cap.end:
            ret, img = cap.read()
            if not ret:
                cap.end = True  # 强制结束标志
                break
            yolo_preds = model([img], size=imsize)
            yolo_preds.files=["img.jpg"]

            # DeepSort跟踪逻辑...
            deepsort_outputs=[]
            for j in range(len(yolo_preds.pred)):
                temp = deepsort_update(
                    deepsort_tracker,
                    yolo_preds.pred[j].cpu(),
                    yolo_preds.xywh[j][:,0:4].cpu(),
                    yolo_preds.ims[j]
                )
                if len(temp)==0:
                    temp=np.ones((0,8))
                deepsort_outputs.append(temp.astype(np.float32))
            # 更新预测结果
            yolo_preds.pred=deepsort_outputs

            # 获取行为检测结果
            frame_objects = []
            for j in range(len(yolo_preds.pred)):
                # 修改后 ✅（显式索引解包）
                for pred in yolo_preds.pred[j]: # ✅ 修正解包顺序
                    # print(f"pred===>{pred}")# pred===>[659  447  768  717  0  27  -35  5]
                    try:
                        # 提取关键参数
                        x_min = float(pred[0])
                        y_min = float(pred[1])
                        x_max = float(pred[2])
                        y_max = float(pred[3])
                        class_id = int(pred[4])  # 第5位是类别ID
                        track_id = int(pred[5])  # 第6位是跟踪ID

                        # 计算宽高
                        width = x_max - x_min
                        height = y_max - y_min

                        # 构建有效对象数据
                        obj_data = {
                            "id": track_id,
                            "class": yolo_preds.names[class_id],# 类别名称
                            "bbox": [x_min, y_min, width, height],  # ✅ 四个参数, 边界框坐标
                            "behavior": id_to_ava_labels.get(track_id, "unknown"),
                            "confidence": 1.0  # 若DeepSORT未返回置信度，可设为固定值或从其他字段获取
                        }
                        frame_objects.append(obj_data)

                    except (IndexError, ValueError) as e:
                        print(f"数据解析失败: {str(e)} | 数据: {pred.tolist()}")
                        continue
                # for *box, conf, cls, trackid, vx, vy in yolo_preds.pred[j]:  # ✅ 修正解包顺序
                #     obj_data = {
                #         "id": int(trackid),
                #         "class": yolo_preds.names[int(cls)],  # 类别名称
                #         "bbox": [float(x) for x in box],  # 边界框坐标
                #         "behavior": id_to_ava_labels.get(int(trackid), "unknown"),
                #         "confidence": float(conf)  # 直接使用conf
                #     }
                #     frame_objects.append(obj_data)
            # 记录当前帧数据
            recorder.record_frame(cap.idx, frame_objects)  # 记录当前帧

            if len(cap.stack) == 25:
                print(f"processing {cap.idx // 25}th second clips")
                clip = cap.get_video_clip()
                if yolo_preds.pred[0].shape[0]:
                    inputs, inp_boxes, _=ava_inference_transform(clip, yolo_preds.pred[0][:,0:4], crop_size=imsize)
                    inp_boxes = torch.cat([torch.zeros(inp_boxes.shape[0],1), inp_boxes], dim=1)
                    if isinstance(inputs, list):
                        inputs = [inp.unsqueeze(0).to(device) for inp in inputs]
                    else:
                        inputs = inputs.unsqueeze(0).to(device)
                    with torch.no_grad():
                        slowfaster_preds = video_model(inputs, inp_boxes.to(device))
                        slowfaster_preds = slowfaster_preds.cpu()
                    for tid,avalabel in zip(yolo_preds.pred[0][:,5].tolist(), np.argmax(slowfaster_preds, axis=1).tolist()):
                        id_to_ava_labels[tid] = ava_labelnames[avalabel+1]

            save_yolopreds_tovideo(yolo_preds, id_to_ava_labels, coco_color_map, outputvideo, config.show)
            # ... 处理逻辑 ...
            processed_frames += 1

        # 处理完成后保存数据
        recorder.save(config.output_dir)  # 需要配置输出目录

        # 添加完成标记
        print(f"✅ 视频处理完成，共处理 {processed_frames}/{total_frames} 帧")
        print("total cost: {:.3f} s, video length: {} s".format(time.time()-a, cap.idx / 25))
        print('saved video to:', vide_save_path)

    except Exception as e:
        print(f"❌ 处理异常: {str(e)}")
        raise RuntimeError(str(e)) from e  # 重新包装异常
    finally:
        # 确保释放所有资源
        if 'cap' in locals() and cap.cap.isOpened():
            cap.release()
        if 'outputvideo' in locals() and outputvideo.isOpened():
            outputvideo.release()
        # cv2.destroyAllWindows()
        # torch.cuda.empty_cache()
    
if __name__=="__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--input', type=str, default="input.mp4", help='test imgs folder or video or camera')
    parser.add_argument('--output', type=str, default="output.mp4", help='folder to save result imgs, can not use input folder')
    # object detect config
    parser.add_argument('--imsize', type=int, default=640, help='inference size (pixels)')
    parser.add_argument('--conf', type=float, default=0.4, help='object confidence threshold')
    parser.add_argument('--iou', type=float, default=0.4, help='IOU threshold for NMS')
    parser.add_argument('--device', default='cuda', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
    parser.add_argument('--show', action='store_true', help='show img')
    config = parser.parse_args()

    if config.input.isdigit():
        print("using local camera.")
        config.input = int(config.input)

    print(config)

    main(config)
