import { defineStore } from "pinia";
import { reactive, computed, shallowRef } from "vue";
import { useRafFn, watchDebounced } from "@vueuse/core";
import { useMqtt } from "@/hooks/useMqtt";
import type { MmC3d_InferenceResult } from "@/types/raw-data";
import type { KonvaGroupConfig } from "@/types/konva";
import { 
	getKonvaListOfSkeletonGroup, 
	updateKonvaConfigBasedOnLatestSize 
} from "@/utils/data-transformers";
import { 
	CAMERA_STREAM_ID, 
	ANOMALY_DETECTION_TYPE,
	SKELETON_RECONSTRUCTION_TYPE,
	HAND_SKELETON_RECONSTRUCTION_TYPE
} from "@/utils/constants";

type Topic = string;
type CameraId = string;
type PipelineId = string;
type InferenceResult = MmC3d_InferenceResult;

// 记录当前实际已订阅到 broker 的主题集合
type SubscribedTopicsMap = Map<Topic, { count: number; cameraIds: Set<string> }>;

type KonvaConfigPipelineEntry = Record<PipelineId, KonvaGroupConfig[]>;
type KonvaConfigByCameraIdObject = Record<CameraId, Record<Topic, KonvaConfigPipelineEntry>>;

// 非响应式累积容器
type ResultsBufferCameraEntry = {
	data: Record<Topic, Record<PipelineId, InferenceResult>>;
	rect: { width: number; height: number };
};
type ResultsBuffer = Record<CameraId, ResultsBufferCameraEntry>;

const encoder = new TextEncoder(); // 编码器
const decoder = new TextDecoder(); // 解码器

export const useInferenceResultStore = defineStore("inferenceResult", () => {
	const { subscribe, unsubscribe, isConnected } = useMqtt();

	const subscribedTopicsMap = reactive<SubscribedTopicsMap>(new Map());
	const subscribedTopics = computed<Array<Topic>>(() => {
		return Array.from(subscribedTopicsMap.keys());
	});

	const konvaConfigByCameraIdObject = shallowRef<KonvaConfigByCameraIdObject>({});

	/*==================== actions ====================*/

	const addTopicUsage = (topic: string, cameraId: string) => {
		let topicEntry = subscribedTopicsMap.get(topic);
		if (topicEntry) {
			topicEntry.count += 1;
			topicEntry.cameraIds.add(cameraId);
		} else {
			topicEntry = { count: 1, cameraIds: new Set([cameraId]) };
			subscribedTopicsMap.set(topic, topicEntry);
		}
	};

	const removeTopicUsage = (topic: string, cameraId: string) => {
		const topicEntry = subscribedTopicsMap.get(topic);
		if (topicEntry) {
			const newCount = Math.max(0, topicEntry.count - 1);
			if (newCount === 0) {
				subscribedTopicsMap.delete(topic);
			} else {
				topicEntry.count = newCount;
				topicEntry.cameraIds.delete(cameraId);
			}
		}
	};

	const updateResults = (next: KonvaConfigByCameraIdObject) => {
		konvaConfigByCameraIdObject.value = next;
	};

	const clearResults = () => {
		konvaConfigByCameraIdObject.value = {};
	};

	/**
	 * 根据条件获取结果
	 *
	 * @param {{ cameraId: string; topics: string[] }} condition
	 * @return {*}
	 */
	const getResultsByCondition = (condition: { cameraId: string; topics: string[] }) => {
		const { cameraId, topics } = condition;
		
		const result = topics.reduce((acc, topic) => {
			const pipelineEntry = konvaConfigByCameraIdObject.value?.[cameraId]?.[topic];
			if (pipelineEntry) {
				Object.entries(pipelineEntry).forEach(([pipelineId, results]) => {
					acc[pipelineId] = results;
				});
			}
			return acc;
		}, {} as KonvaConfigPipelineEntry);
		
		return result;
	};

	/*==================== 更新逻辑 ====================*/

	// 非响应式累积容器
	const resultsBuffer: ResultsBuffer = {};
	// 记录每个 pipeline 的最新时间戳
	const timestampByPipelineIdMap: Map<PipelineId, number> = new Map();
	// 记录每个 camera 的最新矩形大小
	const cameraRectByCameraIdMap: Map<CameraId, { width: number; height: number }> = new Map();

	const PUBLISH_EVERY = 3; // 发布频率
	const IDLE_CLEAR_MS = 1000; // 清理时间
	let rafCount = 0; // 发布节流计数

	const { pause: pauseRafFn, resume: resumeRafFn } = useRafFn(
		(args: { timestamp: number }) => {
			if (++rafCount >= PUBLISH_EVERY) {
				rafCount = 0;
				cleanupStaleResultsBuffer(args.timestamp); // 清理指定时间内未更新的数据
				sendResultsToWorker(resultsBuffer); // 发送到Worker进行处理
			}
		},
		{ immediate: false }
	);

	/**
	 * 清理 resultsBuffer 中的过期数据
	 *
	 * @param {number} now
	 */
	const cleanupStaleResultsBuffer = (now: number) => {
		for (const [cameraId, cameraEntry] of Object.entries(resultsBuffer)) {
			for (const [topic, topicEntry] of Object.entries(cameraEntry.data)) {
				for (const pipelineId in topicEntry) {
					const timestamp =
						timestampByPipelineIdMap.get(`${cameraId}-${pipelineId}`) || 0;
					if (now - timestamp > IDLE_CLEAR_MS) {
						Reflect.deleteProperty(topicEntry, pipelineId);
					}
				}
				if (Object.keys(topicEntry).length === 0) {
					Reflect.deleteProperty(cameraEntry.data, topic);
				}
			}
			if (Object.keys(cameraEntry.data).length === 0) {
				Reflect.deleteProperty(resultsBuffer, cameraId);
			}
		}
	};

	/**
	 * 将推理结果发送到Worker进行处理
	 *
	 * @param {ResultsBuffer} results
	 */
	const sendResultsToWorker = (results: ResultsBuffer) => {
		try {
			// 这里简化处理，直接更新，不使用worker
			// 实际项目中应该使用worker处理
			const processed = processResults(results);
			updateResults(processed);
		} catch (error) {
			console.error(error);
		}
	};

	/**
	 * 处理推理结果
	 */
	const processResults = (results: ResultsBuffer): KonvaConfigByCameraIdObject => {
		const output: KonvaConfigByCameraIdObject = {};
		
		for (const [cameraId, cameraEntry] of Object.entries(results)) {
			output[cameraId] = {};
			const { rect } = cameraEntry;
			
			for (const [topic, topicEntry] of Object.entries(cameraEntry.data)) {
				output[cameraId][topic] = {};
				
				for (const [pipelineId, result] of Object.entries(topicEntry)) {
					let konvaList: KonvaGroupConfig[] = [];
					
					// 根据不同的topic类型处理数据
					if (topic === CAMERA_STREAM_ID) {
						// 目标检测
						const { konvaList: targetList } = getKonvaListOfSkeletonGroup(result, "target");
						konvaList = targetList;
					} else if (topic === SKELETON_RECONSTRUCTION_TYPE) {
						// 姿态估计
						const { konvaList: poseList } = getKonvaListOfSkeletonGroup(result, "pose");
						konvaList = poseList;
					} else if (topic === HAND_SKELETON_RECONSTRUCTION_TYPE) {
						// 手部关节
						const { konvaList: handList } = getKonvaListOfSkeletonGroup(result, "hand");
						konvaList = handList;
					}
					
					// 根据当前画面尺寸更新坐标
					if (rect.width > 0 && rect.height > 0) {
						konvaList = updateKonvaConfigBasedOnLatestSize(konvaList, rect.width, rect.height);
					}
					
					output[cameraId][topic][pipelineId] = konvaList;
				}
			}
		}
		
		return output;
	};

	/**
	 * 更新每个 camera 的最新矩形大小
	 *
	 * @param {string} cameraId
	 * @param {{ width: number; height: number }} rect
	 */
	const updateCameraRectByCameraId = (
		cameraId: string,
		rect: { width: number; height: number }
	) => {
		cameraRectByCameraIdMap.set(cameraId, rect);
	};

	/**
	 * 处理订阅回调
	 *
	 * @param {string} topic
	 * @param {string} message
	 * @return {*}
	 */
	const subscribeCallbackFn = (topic: string, message: string) => {
		// 检查当前 topic 是否有组件在使用
		const topicEntry = subscribedTopicsMap.get(topic);
		if (!topicEntry) return;

		try {
			const rawData = JSON.parse(message);
			const timestamp = performance.now() as number;

			rawData?.batched_results?.forEach((result: InferenceResult) => {
				const { camera_id, pipeline_id, results } = result;

				if (topicEntry.cameraIds.has(camera_id) && results?.length > 0) {
					// 获取 camera 的最新矩形大小
					const rect = cameraRectByCameraIdMap.get(camera_id) || { width: 0, height: 0 };

					resultsBuffer[camera_id] ??= { data: {} } as any;
					resultsBuffer[camera_id].rect = rect;
					resultsBuffer[camera_id].data[topic] ??= {};
					resultsBuffer[camera_id].data[topic][pipeline_id] = result;
					// 设置 timestamp
					timestampByPipelineIdMap.set(`${camera_id}-${pipeline_id}`, timestamp);
				}
			});
		} catch (error) {
			console.warn("Handle MQTT message failed", error);
		}
	};

	// 记录当前实际已订阅到 broker 的主题集合
	let currentSubscribed = new Set<string>();
	// 记录上次连接状态，用于识别"刚连上"的场景
	let wasConnected = false;

	/**
	 * 同步MQTT订阅
	 *
	 * @param {[boolean, Array<string>]} [connected, topics]
	 */
	const syncMqttSubscriptions = ([connected, topics]: [boolean, Array<string>]) => {
		if (connected) {
			// 若刚刚从未连接 -> 已连接，视为基线为空，需要把期望主题全部订阅上
			const baseline = wasConnected ? currentSubscribed : new Set<string>();

			const toAdd = topics.filter((t) => !baseline.has(t));
			const toRemove = Array.from(baseline).filter((t) => !topics.includes(t));

			toAdd.forEach((topic) => subscribe(topic, subscribeCallbackFn));
			toRemove.forEach((topic) => unsubscribe(topic, subscribeCallbackFn));

			currentSubscribed = new Set(topics);

			if (currentSubscribed.size > 0) {
				resumeRafFn(); // 恢复raf
			} else {
				pauseRafFn(); // 暂停raf
				rafCount = 0; // 重置发布节流计数
				clearResults(); // 清空对外数据
			}
		} else {
			pauseRafFn();
			rafCount = 0;
			clearResults();
		}

		wasConnected = connected;
	};

	// 监听连接状态和订阅主题
	watchDebounced([isConnected, subscribedTopics], syncMqttSubscriptions, {
		debounce: 200,
		immediate: true,
	});

	return {
		subscribedTopics,
		addTopicUsage,
		removeTopicUsage,
		getResultsByCondition,
		updateCameraRectByCameraId,
	};
});

export function useInferenceResultStoreHook() {
	const store = useInferenceResultStore();
	return store;
}

