import os
import sqlite3
from datetime import datetime
import threading
import cv2
import chromadb
import torch
from chromadb.config import Settings
from typing import List, Union
import numpy as np
import redis

class DatabaseManager:
    def __init__(self, db_name='CorridorPedestrianReid.db'):
        # 获取当前脚本所在目录作为基础路径
        self.base_dir = os.path.dirname(os.path.abspath(__file__))
        self.gallery_dir = os.path.join(self.base_dir, "gallery")

        # 确保gallery目录存在
        os.makedirs(self.gallery_dir, exist_ok=True)

        # 数据库路径处理
        self.db_path = os.path.join(self.base_dir, db_name)
        print(f"数据库路径: {self.db_path}")

        # 使用线程局部存储
        self.local = threading.local()

        # 初始化时创建表
        self._create_table_if_not_exists()

    def _get_conn(self):
        """获取当前线程的数据库连接（线程安全）"""
        if not hasattr(self.local, 'conn'):
            self.local.conn = sqlite3.connect(self.db_path)
            self.local.cursor = self.local.conn.cursor()
        return self.local.conn, self.local.cursor

    def _create_table_if_not_exists(self):
        """创建表（如果不存在）"""
        conn, cursor = self._get_conn()
        cursor.execute('''
        CREATE TABLE IF NOT EXISTS gallery (
            pid INTEGER PRIMARY KEY AUTOINCREMENT,
            path TEXT,
            time TEXT NOT NULL
        )
        ''')
        # 创建person_trajectory表（行人历史轨迹表）
        cursor.execute('''
        CREATE TABLE IF NOT EXISTS person_trajectory (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            pid INTEGER NOT NULL,
            camera_id INTEGER NOT NULL,
            time TEXT NOT NULL,  -- 格式：YYYY-MM-DD-HH-MM-SS
            FOREIGN KEY(pid) REFERENCES gallery(pid)
        )
        ''')
        conn.commit()

    def save_person_image(self, crop_imgs: List[np.ndarray]) -> List[int]:
        conn, cursor = self._get_conn()
        try:
            pids = []
            current_time = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")

            # 1. 逐条插入记录，获取每个 pid
            for _ in crop_imgs:
                cursor.execute(
                    'INSERT INTO gallery (time) VALUES (?)',
                    (current_time,)
                )
                pids.append(cursor.lastrowid)

            # 2. 批量保存图片并更新路径
            update_data = []
            for pid, img in zip(pids, crop_imgs):
                filename = f"{pid}.jpg"
                rel_path = os.path.join("gallery", filename)
                abs_path = os.path.join(self.gallery_dir, filename)

                cv2.imwrite(abs_path, img)
                update_data.append((rel_path.replace("\\", "/"), pid))

            # 3. 批量更新path字段
            cursor.executemany(
                'UPDATE gallery SET path = ? WHERE pid = ?',
                update_data
            )
            conn.commit()
            return pids
        except Exception as e:
            conn.rollback()
            raise e

    def get_all_images(self):
        """获取所有 gallery 数据（线程安全版本）"""
        _, cursor = self._get_conn()
        cursor.execute('SELECT pid, path FROM gallery')
        return cursor.fetchall()

    def insert_trajectory_records(self, pids, camera_id):
        """
        批量插入行人轨迹记录

        参数:
            pids: 行人ID列表，如 [1, 2, 3]
            camera_id: 摄像头ID

        返回:
            插入的记录数量
        """
        if not pids:
            return 0

        conn, cursor = self._get_conn()
        current_time = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")

        try:
            # 使用executemany批量插入
            records = [(pid, camera_id, current_time) for pid in pids]
            cursor.executemany('''
            INSERT INTO person_trajectory (pid, camera_id, time)
            VALUES (?, ?, ?)
            ''', records)

            conn.commit()

        except Exception as e:
            conn.rollback()
            print(f"插入轨迹记录失败: {e}")
    def close_current_connection(self):
        """关闭当前线程的数据库连接"""
        if hasattr(self.local, 'conn'):
            self.local.conn.close()
            del self.local.conn
            del self.local.cursor

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close_current_connection()


class VectorDatabase:
    def __init__(self, db_path: str = "./Chroma"):
        """
        初始化向量数据库

        参数:
            db_path: 数据库存储路径
        """
        # 允许通过编程方式重置（清空）数据库
        # 当设置为 True 时，可以通过 client.reset() 方法完全清空数据库
        # 这相当于删除所有集合（表）和数据，恢复到一个全新的状态
        self.client = chromadb.PersistentClient(
            path=db_path,
            settings=Settings(allow_reset=True,anonymized_telemetry=False)
        )

        # 创建或获取集合，使用L2距离度量
        self.collection = self.client.get_or_create_collection(
            name="pedestrian_embeddings",
            metadata={"hnsw:space": "l2"}  # 使用L2距离
        )

    def add_person(self,
                   embeddings: Union[torch.Tensor, np.ndarray, List[List[float]]],
                   person_ids: Union[List[int], List[str]]):
        """
        添加行人特征向量到数据库

        参数:
            embeddings: 可以是以下类型:
                       - torch.Tensor (模型直接输出的feat)
                       - numpy数组
                       - 二维浮点数列表
            person_ids: 对应的行人身份ID列表
        """
        # 统一转换为numpy数组处理
        if isinstance(embeddings, torch.Tensor):
            embeddings = embeddings.detach().cpu().numpy()
        elif isinstance(embeddings, list):
            embeddings = np.array(embeddings)

        # 确保是二维数组 (batch_size, feature_dim)
        if embeddings.ndim == 1:
            embeddings = np.expand_dims(embeddings, 0)

        # 转换为列表格式供Chroma使用
        embeddings = embeddings.tolist()

        person_ids=[str(pid) for pid in person_ids]

        # 添加到集合
        self.collection.add(
            embeddings=embeddings,
            ids=person_ids,
        )

    def search_person(self,
                      query_embeddings: Union[torch.Tensor, np.ndarray, List[List[float]]],
                      top_k: int = 1,
                      threshold: float = 1.0) -> List[List[str]]:
        """
        搜索相似行人

        参数:
            query_embeddings: 可以是以下类型:
                             - torch.Tensor (模型直接输出的feat)
                             - numpy数组
                             - 二维浮点数列表
            top_k: 返回最相似的k个结果
            threshold: 距离阈值

        返回:
            每个查询向量的匹配结果列表，子列表包含匹配的行人ID(可能为空)
        """
        # 统一转换为numpy数组处理
        if isinstance(query_embeddings, torch.Tensor):
            query_embeddings = query_embeddings.detach().cpu().numpy()
        elif isinstance(query_embeddings, list):
            query_embeddings = np.array(query_embeddings)

        # 确保是二维数组 (batch_size, feature_dim)
        if query_embeddings.ndim == 1:
            query_embeddings = np.expand_dims(query_embeddings, 0)

        # 转换为列表格式供Chroma使用
        query_embeddings = query_embeddings.tolist()

        # 在数据库中搜索
        results = self.collection.query(
            query_embeddings=query_embeddings,
            n_results=top_k,
            include=["distances"]
        )
        print(results)
        filtered_ids = []

        for ids, dists in zip(results["ids"], results["distances"]):
            matched = [id_ for id_, dist in zip(ids, dists) if dist <= threshold]
            filtered_ids.append(matched)

        return filtered_ids

    def get_all_persons(self) -> dict:
        """
        获取所有行人数据

        返回:
            字典包含:
                'ids': 记录UUID列表
                'embeddings': 特征向量列表
                'person_ids': 行人身份ID列表
        """
        data = self.collection.get(include=["embeddings"])

        return {
            "ids": data["ids"],
            "embeddings": data["embeddings"],
        }


class CacheDatabase:
    def __init__(self, host='localhost', port=6379, db=0, threshold=1.0,
                 expire_time=3600, use_gpu=True):
        self.redis_conn = redis.Redis(host=host, port=port, db=db)
        self.threshold = threshold
        self.expire_time = expire_time
        self.use_gpu = use_gpu
        self.device = torch.device('cuda' if use_gpu and torch.cuda.is_available() else 'cpu')

    def _to_numpy(self, data: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
        if isinstance(data, torch.Tensor):
            return data.detach().cpu().numpy()
        return data

    def _to_bytes(self, vector: Union[torch.Tensor, np.ndarray]) -> bytes:
        np_vec = self._to_numpy(vector).astype(np.float32)
        return np_vec.tobytes()

    def _from_bytes(self, byte_data: bytes) -> np.ndarray:
        if not byte_data or len(byte_data) % 4 != 0:
            raise ValueError(f"Invalid byte length: {len(byte_data)}")
        return np.frombuffer(byte_data, dtype=np.float32)

    def add_persons(self, ids: Union[List[str], List[int]], vectors: Union[np.ndarray, torch.Tensor]):
        """
        将多个行人特征批量添加到 Redis。
        :param ids: 行人 ID 列表（字符串或整数）
        :param vectors: shape 为 [N, D] 的向量，Tensor 或 ndarray
        """
        vectors = self._to_numpy(vectors)
        if len(ids) != vectors.shape[0]:
            raise ValueError("IDs 数量与向量数量不一致")

        with self.redis_conn.pipeline() as pipe:
            for person_id, vec in zip(ids, vectors):
                pipe.set(str(person_id), self._to_bytes(vec))
                pipe.expire(str(person_id), self.expire_time)
            pipe.execute()

    def query_persons(self, query_feats: Union[torch.Tensor, np.ndarray]) -> List[Union[str, int]]:
        """
        查询 Redis 数据库中与给定向量最接近的ID。
        :param query_feats: shape 为 [N, D] 的 Tensor 或 ndarray
        :return: 每个向量最匹配的 ID，如果无匹配返回 0
        """
        if isinstance(query_feats, torch.Tensor):
            if query_feats.numel() == 0:
                return []
        elif isinstance(query_feats, np.ndarray):
            if query_feats.size == 0:
                return []

        keys = self.redis_conn.keys('*')
        if not keys:
            return [0] * query_feats.shape[0]

        db_vectors = []
        db_ids = []

        for key in keys:
            try:
                byte_data = self.redis_conn.get(key)
                vec = self._from_bytes(byte_data)
                db_vectors.append(vec)
                db_ids.append(key.decode('utf-8'))
            except Exception as e:
                print(f"[WARNING] 跳过非法 Redis 条目 {key}: {e}")
                continue

        if not db_vectors:
            return [0] * query_feats.shape[0]

        db_np = np.stack(db_vectors)
        query_np = self._to_numpy(query_feats)

        if self.use_gpu and torch.cuda.is_available():
            db_tensor = torch.tensor(db_np, device=self.device)
            query_tensor = torch.tensor(query_np, device=self.device)
            distances = torch.cdist(query_tensor, db_tensor, p=2)
            min_distances, min_indices = torch.min(distances, dim=1)
            min_distances = min_distances.cpu().numpy()
            min_indices = min_indices.cpu().numpy()
        else:
            distances = np.linalg.norm(query_np[:, np.newaxis] - db_np, axis=2)
            min_indices = np.argmin(distances, axis=1)
            min_distances = distances[np.arange(query_np.shape[0]), min_indices]

        results = []

        for i in range(query_np.shape[0]):
            if min_distances[i] < self.threshold:
                matched_id = db_ids[min_indices[i]]
                try:
                    matched_id = int(matched_id)
                except:
                    pass
                results.append(matched_id)
            else:
                results.append(0)

        return results
