from threading import Thread 
import queue
import time 
from collections import defaultdict
import cv2 

from src.yolov8_face_detec import YoloFace 
from src.face_recognition import FaceRecog 
from src.face_database import FaceDatabase
from src.post_process import post_process

from src.post_process_all_body import post_process_allbody
from src.global_dict import gd
from ultralytics import YOLO
from collections import Counter
from pathlib import Path

class Engine:
    def __init__(self, logger, args):
        self.logger = logger
        self.args = args

        self.cam_dict = {}
        self.task_dict = {}
        self.face_dict = {}

        self.face_database = FaceDatabase(args.redis_port, args.face_database_thres)
        self.yolo_face = YoloFace(args.triton_port, args.face_detec_model_name) 
        self.face_recog = FaceRecog(args.triton_port, args.redis_port, args.face_database_thres, args.face_recog_model_name)
        self.logger.info('engine inited ------')

    def add_source(self, task_id, stream_url, grpc_address, decode_mode, skip_frame_rate, add_stranger, alert_time, dbase_name,engine_file_path,rule_info):
        gd.add_task(task_id, grpc_address, skip_frame_rate, add_stranger, alert_time, dbase_name)
        decode_mode = str(decode_mode)
        self.logger.info(f'creating video capture task {stream_url} ------')
        self.cam_dict[task_id] = VideoCapture(task_id, stream_url, mode=decode_mode)
        self.cam_dict[task_id].start()

        self.logger.info(f'creating stream task {task_id} ------')
        self.task_dict[task_id] = StreamTask(task_id, self.args, self.cam_dict,engine_file_path,rule_info)
        self.task_dict[task_id].start()


        self.logger.info(f'creating face_recognition task {task_id} ------')
        self.face_dict[task_id] = FaceTecognition(task_id,self.args,self.task_dict)
        self.face_dict[task_id].start()
        
    
    def remove_source(self, task_id):
        if task_id in self.cam_dict.keys():
            self.cam_dict[task_id].stop()
            self.cam_dict.pop(task_id)

        self.task_dict.pop(task_id)
        self.face_dict.pop(task_id)
        gd.remove_task(task_id)        
    
class FaceTecognition(Thread):
    def __init__(self, task_id, args, task_dict):
        super(FaceTecognition, self).__init__() 
        self.task_id = task_id
        self.args = args
        self.track_msg = task_dict[task_id]
        self.face_recog = FaceRecog(args.triton_port, args.redis_port, args.face_database_thres, args.face_recog_model_name)
        self.logger = gd.logger_dict[task_id]

    def get_most_frequent_with_highest_confidence(self, data):
    # 用于统计每个字符串的出现次数和记录最大置信度
        count = defaultdict(int)
        max_confidence = {}

        # 遍历数据，统计次数并记录每个标签的最大置信度
        for label, confidence in data:
            count[label] += 1
            if label not in max_confidence or confidence > max_confidence[label]:
                max_confidence[label] = confidence

        # 找出出现次数最多的标签
        max_count = max(count.values())
        candidates = [label for label, cnt in count.items() if cnt == max_count]

        # 在这些标签中选择置信度最高的那个标签
        best_label = max(candidates, key=lambda x: max_confidence[x])

        # 找出原始数据中该标签的所有项，并选出置信度最高的那一项及其索引
        best_in_group = None
        best_confidence = -1
        best_index = -1

        for idx, (label, confidence) in enumerate(data):
            if label == best_label:
                if confidence > best_confidence:
                    best_confidence = confidence
                    best_in_group = (label, confidence)
                    best_index = idx

        return best_in_group, best_index
    
    def filter_strangers(self, name_list, exclude_label='stranger'):
        name_list_base = []
        filtered_indices = []

        for idx, item in enumerate(name_list):
            if exclude_label in item[0]:
                filtered_indices.append(idx)
            else:
                name_list_base.append(item)

        return name_list_base, filtered_indices
    def run(self):
        dbase_name = gd.rule_info_dict[self.task_id].get('dbase_name', 'stream_test')
        add_stranger = gd.rule_info_dict[self.task_id].get('add_stranger', 0)
        alert_time = int(gd.rule_info_dict[self.task_id].get('alert_time', '1'))
        save_dir = Path("face_bgr") / str(self.task_id)
        save_dir.mkdir(parents=True, exist_ok=True)
        
        try:
            while True:
                track_msg = self.track_msg.read()
                
                time.sleep(1)
                
                if track_msg is None:
                    break
                
                track_id = next(iter(track_msg.keys()))
                face_images = track_msg[track_id]['face_bgr']
                if not face_images:
                    continue
                    
                end_time = track_msg[track_id]['endt']
                name_list = []
                
                for img_data in face_images:
                    try:
                        features, _ = self.face_recog.get_features([img_data[1]])
                        if features is None:
                            continue
                        name, conf = self.face_recog.top1(dbase_name, features[0], add_stranger)
                        name_list.append((name, conf))
                    except Exception as e:
                        self.logger.warning(f"Face recognition failed for track {track_id}: {str(e)}")
                        continue

                valid_names, stranger_indices = self.filter_strangers(name_list, 'stranger')
                valid_faces = [img for idx, img in enumerate(face_images) if idx not in stranger_indices]

                if valid_names:
                    best_name, index = self.get_most_frequent_with_highest_confidence(valid_names)
                    safe_name = "".join(c for c in best_name[0] if c.isalnum() or c in (' ', '_'))
                    img_path = save_dir / f"{safe_name}_{track_id}.jpg"
                    
                    try:
                        cv2.imwrite(str(img_path), valid_faces[index][1])
                        person_end_msg = {'name': best_name[0], 'end_time': end_time}
                        self.logger.info(f'task_id:{self.task_id}, track_id:{track_id}, person_end_msg:{person_end_msg}')
                    except Exception as e:
                        self.logger.error(f"Failed to save image for {best_name[0]}: {str(e)}")

        except Exception as e:
            self.logger.error(f"FaceTecognition task {self.task_id} failed: {str(e)}", exc_info=True)
        finally:
            self.logger.info(f"FaceTecognition task {self.task_id} stopped")
               
class VideoCapture(Thread):
    def __init__(self, task_id, stream_url, mode='cpu'):
        super(VideoCapture, self).__init__()
        self.task_id = task_id
        self.stream_url = str(stream_url)
        self.mode = str(mode).lower()

        if not hasattr(gd, 'logger_dict') or task_id not in gd.logger_dict:
            raise ValueError(f"Logger not found for task_id: {task_id}")

        try:
            if self.mode == 'cpu':
                if self.stream_url == '0':
                    self.cap = cv2.VideoCapture(0)
                    if not self.cap.isOpened():
                        raise RuntimeError("Failed to open camera device 0")
                    
                    self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 3840)
                    self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 2160)
                    self.cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
                    self.cap.set(cv2.CAP_PROP_FPS, 30)
                else:
                    self.cap = cv2.VideoCapture(self.stream_url)
                    if not self.cap.isOpened():
                        raise RuntimeError(f"Failed to open video stream: {self.stream_url}")

            elif self.mode == 'gpu':
                self.cap = cv2.cudacodec.createVideoReader(self.stream_url)
                if not self.cap:
                    raise RuntimeError(f"Failed to create GPU video reader for: {self.stream_url}")
            else:
                raise ValueError(f'Unsupported mode: {self.mode}')

            self.stopped = False
            self.logger = gd.logger_dict[task_id]
            self.q = queue.Queue()
            self.frame_count = 0

        except Exception as e:
            if hasattr(self, 'cap') and self.cap:
                if self.mode == 'cpu' and self.cap.isOpened():
                    self.cap.release()
            raise RuntimeError(f"VideoCapture initialization failed: {str(e)}")

    # read frames as soon as they are available, keeping only most recent one
    def run(self):
        self.logger.info(f'videocapture task_id {self.task_id} stream_url {self.stream_url} start ------')
        if not self.cap:
            self.logger.error('VideoCapture not initialized')
            return

        mp4_sleep_time = 1/25  # 1/5 seconds
        while not self.stopped:
            try:
                if self.mode == 'cpu':
                    ret, frame = self.cap.read()
                    if not ret: 
                        break
                    if self.stream_url.lower().endswith(('.mp4', 'dav')):
                        time.sleep(mp4_sleep_time)
                else:
                    ret, frame = self.cap.nextFrame()
                    if not ret: 
                        break
                    frame = cv2.cuda.cvtColor(frame, cv2.COLOR_BGRA2BGR)
                    frame = cv2.cuda.resize(frame, (1920, 1080))
                    frame = frame.download()
            except (cv2.error, RuntimeError) as e:
                self.logger.error(f'Video processing error: {e}')
                break
            except Exception as e:
                self.logger.error(f'Unexpected error: {e}')
                break

            skip_rate = gd.rule_info_dict[self.task_id].get('skip_frame_rate', 3)
            if skip_rate <= 0 or (self.frame_count % max(1, skip_rate)) == 0:
                if not self.q.empty():
                    try:
                        self.q.get_nowait()   # discard previous (unprocessed) frame
                    except queue.Empty:
                        pass 
                self.q.put(frame)

            self.frame_count = (self.frame_count + 1) % (10 * skip_rate) if skip_rate > 0 else 0

        self.q.queue.clear()
        if self.mode == 'cpu' and self.cap.isOpened():
            self.cap.release()

        self.stopped = True
        self.logger.info(f'videocapture task_id {self.task_id} stream_url {self.stream_url} stopped ------')
    def stop(self):
        self.stopped = True
        self.logger.info(f'stopping videocapture task_id {self.task_id} stream_url {self.stream_url}------')

    def read(self):
        if self.stopped: return None 

        frame = self.q.get()
        return frame

class StreamTask(Thread):
    def __init__(self, task_id, args, cam_dict,engine_file_path,rule_info):
        super(StreamTask, self).__init__()

        self.task_id = task_id 
        self.args = args 
        self.cam = cam_dict[task_id]

        self.logger = gd.logger_dict[task_id]
        self.frame_count = 0
        self.model_path = engine_file_path

        self.yolo11n_pose = YOLO(self.model_path)
        self.yolo_face = YoloFace(args.triton_port, args.face_detec_model_name) 
        
        self.rule_info = rule_info

        self.result_details = defaultdict(self.default_entry)

        self.q_endt = queue.Queue()
        self.track_id_list = []
        self.end_track_id_list = []

    def default_entry(self):
        return {'face_bgr': [], 'endt': None}

    def run(self):
        self.logger.info(f'stream task {self.task_id} start ------')

        all_face = self.rule_info.get('all_face', False)
        # print(all_face)

        while not self.cam.stopped:
            img = self.cam.read() 
            if img is None:
                self.logger.error('img is none ------')
                break

            self.frame_count += 1
            gd.heart_beat_dict[self.task_id].beat()


            post_process(img, self.task_id, self.args, self.logger, self.yolo_face,self.yolo11n_pose,self.rule_info,self.frame_count, self.result_details,self.q_endt,self.track_id_list,
                         self.end_track_id_list)


        # print(first_key,first_value)

        # for id in result_details:
        #     for index, face in enumerate(result_details[id]['face_bgr']):
        #         cv2.imwrite('face_bgr/' + str(id) + '_'+str(index) + '.jpg', face[1])


        self.logger.info(f'stream task {self.task_id} break ------')

    def read(self):

        track_id_msg = self.q_endt.get()
        return track_id_msg
