import os
import json
import numpy as np
import chromadb
from sentence_transformers import SentenceTransformer
from sklearn.decomposition import PCA
from typing import Dict, List


class UserInformationVectorizer:
    def __init__(
        self,
        model_path: str = "/home/ai/huggingface_cache/BAAI/bge-large-zh-v1.5/bge-large-zh-v1.5",
        chroma_persist_dir: str = "/home/ai/Recommendation_system/chroma_template_db",
        input_json_path: str = "/home/ai/Recommendation_system/database/user_information_data.json",
        output_vector_path: str = "/home/ai/Recommendation_system/user_vector_data.json",
        template_vector_path: str = "/home/ai/Recommendation_system/template_vectors.json"
    ):
        self.model_path = model_path
        self.chroma_persist_dir = chroma_persist_dir
        self.input_json_path = input_json_path
        self.output_vector_path = output_vector_path
        self.template_vector_path = template_vector_path
        
        # 先获取模板维度
        self.template_dim = self._get_template_vector_dim()
        
        # 加载模型
        self.model = self._load_model()
        
        # 初始化ChromaDB（使用模板维度）
        self.chroma_client, self.user_collection = self._init_chroma_db()
        
        self.user_data = []
        self.dim_reducer = None

    def _get_template_vector_dim(self) -> int:
        """获取模板向量维度"""
        if not os.path.exists(self.template_vector_path):
            raise FileNotFoundError(f"模板向量文件不存在：{self.template_vector_path}")
        
        try:
            with open(self.template_vector_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
            
            for tpl in data.get("templates", []):
                vector = tpl.get("vector") or tpl.get("vector_sample")
                if vector:
                    dim = len(vector)
                    print(f"检测到模板向量维度为：{dim}")
                    return dim
            
            raise ValueError("模板向量文件中未找到有效向量")
        except Exception as e:
            raise RuntimeError(f"获取模板向量维度失败：{e}")

    def _init_chroma_db(self) -> tuple:
        """初始化ChromaDB，确保集合维度与模板匹配"""
        try:
            os.makedirs(self.chroma_persist_dir, exist_ok=True)
            client = chromadb.PersistentClient(path=self.chroma_persist_dir)
            collection_name = "user_information_embeddings"
            
            # 关键修复：更可靠的集合存在性检查和删除逻辑
            try:
                # 尝试获取集合
                existing_collection = client.get_collection(name=collection_name)
                
                # 检查现有集合维度是否与模板一致
                existing_dim = existing_collection.metadata.get("embedding_dimension")
                if existing_dim != self.template_dim:
                    print(f"现有集合维度不匹配（{existing_dim}维），删除并重建...")
                    client.delete_collection(name=collection_name)
                    # 重新创建集合
                    collection = client.create_collection(
                        name=collection_name,
                        metadata={
                            "description": f"用户信息向量集合（与模板维度匹配：{self.template_dim}维）",
                            "embedding_dimension": self.template_dim
                        }
                    )
                else:
                    print(f"使用现有集合（{self.template_dim}维）")
                    collection = existing_collection
            
            except chromadb.errors.CollectionNotFoundError:
                # 集合不存在，直接创建
                print(f"创建新集合（{self.template_dim}维）")
                collection = client.create_collection(
                    name=collection_name,
                    metadata={
                        "description": f"用户信息向量集合（与模板维度匹配：{self.template_dim}维）",
                        "embedding_dimension": self.template_dim
                    }
                )
            
            print(f"✓ 成功初始化ChromaDB（{self.template_dim}维）：{self.chroma_persist_dir}")
            return client, collection
        except Exception as e:
            print(f"✗ ChromaDB初始化失败：{e}")
            raise

    # 其他方法保持不变...
    def _load_model(self) -> SentenceTransformer:
        try:
            model = SentenceTransformer(self.model_path)
            print(f"✓ 成功加载模型：{self.model_path}")
            return model
        except Exception as e:
            print(f"✗ 模型加载失败：{e}")
            raise

    def load_user_information(self) -> bool:
        if not os.path.exists(self.input_json_path):
            print(f"✗ 原始用户信息文件不存在：{self.input_json_path}")
            return False
        
        try:
            with open(self.input_json_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
            
            if not isinstance(data, list):
                print(f"✗ 数据格式错误，预期为列表，实际为{type(data)}")
                return False
            
            valid_data = []
            for item in data:
                if isinstance(item, dict) and "user_id" in item:
                    valid_data.append(item)
                else:
                    print(f"⚠ 跳过无效数据：{item}")
            
            self.user_data = valid_data
            print(f"✓ 已加载{len(valid_data)}个有效用户信息")
            return True
        except Exception as e:
            print(f"✗ 加载用户信息失败：{e}")
            return False

    def _user_info_to_text(self, user: Dict) -> str:
        fields = [
            f"公司：{user.get('company_name', '')}",
            f"行业：{user.get('industry', '')}"
        ]
        return "; ".join([f for f in fields if f.strip() not in ["", "："]])

    def _train_dim_reducer(self, all_embeddings: List[np.ndarray]) -> PCA:
        if len(all_embeddings) == 0:
            raise ValueError("没有可用的向量用于训练维度调整模型")
        
        if len(all_embeddings[0]) == self.template_dim:
            print(f"模型输出维度已与模板一致（{self.template_dim}维），无需降维")
            return None
        
        print(f"训练PCA模型：{len(all_embeddings[0])}维 → {self.template_dim}维")
        pca = PCA(n_components=self.template_dim)
        pca.fit(np.array(all_embeddings))
        print(f"PCA训练完成，解释方差比：{sum(pca.explained_variance_ratio_):.4f}")
        return pca

    def process_and_store_vectors(self) -> bool:
        if not self.user_data:
            print("✗ 没有可处理的用户数据，请先加载用户信息")
            return False
        
        try:
            all_raw_embeddings = []
            user_texts = []
            for user in self.user_data:
                user_text = self._user_info_to_text(user)
                if not user_text:
                    print(f"⚠ 用户{user['user_id']}信息为空，跳过处理")
                    continue
                user_texts.append((user, user_text))
                
                embedding = self.model.encode(
                    user_text,
                    normalize_embeddings=True,
                    show_progress_bar=False
                )
                all_raw_embeddings.append(embedding)
            
            self.dim_reducer = self._train_dim_reducer(all_raw_embeddings)
            
            all_vectors = []
            for idx, (user, user_text) in enumerate(user_texts):
                user_id = user["user_id"]
                raw_embedding = all_raw_embeddings[idx]
                
                if self.dim_reducer:
                    embedding = self.dim_reducer.transform([raw_embedding])[0]
                else:
                    embedding = raw_embedding
                
                embedding_list = embedding.tolist() if isinstance(embedding, np.ndarray) else embedding
                
                # 存储到ChromaDB
                self.user_collection.upsert(
                    ids=[str(user_id)],
                    embeddings=[embedding_list],
                    metadatas=[user]
                )
                
                all_vectors.append({
                    "user_id": user_id,
                    "vector": embedding_list,
                    "original_dim": len(raw_embedding),
                    "target_dim": self.template_dim
                })
                
                print(f"✓ 已处理用户{user_id}，向量维度：{len(embedding_list)}（与模板匹配）")
            
            self._export_vectors_to_json(all_vectors)
            print(f"✓ 所有用户向量处理完成，维度均为{self.template_dim}（与模板一致）")
            return True
        except Exception as e:
            print(f"✗ 向量处理失败：{e}")
            return False

    def _export_vectors_to_json(self, vectors: List[Dict]) -> bool:
        try:
            output_dir = os.path.dirname(self.output_vector_path)
            os.makedirs(output_dir, exist_ok=True)
            
            with open(self.output_vector_path, 'w', encoding='utf-8') as f:
                json.dump(vectors, f, ensure_ascii=False, indent=2)
            
            print(f"✓ 向量已导出至：{self.output_vector_path}（维度：{self.template_dim}）")
            return True
        except Exception as e:
            print(f"✗ 向量导出失败：{e}")
            return False


if __name__ == "__main__":
    print("="*50)
    print("用户信息向量转换工具（维度匹配模板）".center(40))
    print("="*50)
    
    vectorizer = UserInformationVectorizer()
    
    print("\n1. 加载原始用户信息...")
    if not vectorizer.load_user_information():
        exit(1)
    
    print("\n2. 生成并存储匹配维度的向量...")
    if not vectorizer.process_and_store_vectors():
        exit(1)
    
    print("\n" + "="*50)
    print("所有操作完成".center(40))
    print("="*50)
