import logging
import jieba
from urllib.parse import quote
import os
from sklearn.metrics.pairwise import cosine_similarity
import random
import torch
from transformers import CLIPProcessor, CLIPModel
import numpy as np
from pathlib import Path
from collections import defaultdict

# Configure logging
logger = logging.getLogger(__name__)

class SearchModule:
    def __init__(self, dataset_path):
        self.dataset_path = dataset_path
        # Initialize CLIP model and processor
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        
        # 从本地加载模型
        model_path = Path("models/clip-vit-base-patch32")
        try:
            self.model = CLIPModel.from_pretrained(str(model_path))
            self.processor = CLIPProcessor.from_pretrained(str(model_path))
            self.model.to(self.device)
            logger.info("成功加载本地CLIP模型")
        except Exception as e:
            logger.error(f"加载本地模型失败: {str(e)}")
            raise Exception("请确保models/clip-vit-base-patch32目录下包含所需的模型文件")

        # 基于实际数据集的语义映射字典
        self.semantic_mapping = {
            # 有害垃圾相关
            "有害垃圾": ["LED灯泡", "保健品瓶", "口服液瓶", "指甲油", "杀虫剂", "温度计", "滴眼液瓶", 
                     "玻璃灯管", "电池", "电池板", "碘伏空瓶", "红花油", "纽扣电池", "胶水", "药品包装", 
                     "药片", "药膏", "蓄电池", "血压计"],
            
            # 厨余垃圾相关
            "厨余垃圾": ["八宝粥", "巴旦木", "白菜", "白萝卜", "板栗", "贝果", "冰糖葫芦", "饼干", 
                     "菠萝", "菠萝包", "菠萝蜜", "残渣剩饭", "蚕豆", "草莓", "茶叶", "肠(火腿)", 
                     "陈皮", "橙子", "蛋", "蛋挞", "番茄", "粉条", "甘蔗", "糕点", "骨肉相连", 
                     "瓜子", "果冻", "哈密瓜", "汉堡", "核桃", "荷包蛋", "红豆", "胡萝卜", "花生皮", 
                     "火龙果", "鸡翅", "咖啡渣", "开心果", "烤鸡烤鸭", "辣椒", "梨", "蘑菇", "牛肉干", 
                     "苹果", "普通面包", "青菜", "生肉", "圣女果", "薯条", "蒜", "西瓜皮", "香蕉皮", 
                     "炸鸡"],
                     
            # 可回收物相关
            "可回收物": ["八宝粥罐", "芭比娃娃", "保温杯", "保鲜盒", "报纸", "变形玩具", "玻璃壶", 
                     "玻璃器皿", "玻璃球", "餐垫", "叉子", "插线板", "茶叶罐", "车钥匙", "尺子", 
                     "充电宝", "充电头", "充电线", "吹风机", "搓衣板", "打气筒", "打印机墨盒", 
                     "单肩包手提包", "蛋糕盒", "档案袋", "刀", "地球仪", "地铁票", "灯罩", "登机牌", 
                     "电动卷发棒", "电动剃须刀", "电动牙刷", "电路板", "电视遥控器", "电熨斗", "垫子", 
                     "钉子", "订书机", "耳钉耳环", "耳机", "耳套", "放大镜"],
                     
            # 其他垃圾相关
            "其他垃圾": ["PE塑料袋", "U型回形针", "一次性杯子", "一次性棉签", "串串竹签", "便利贴", 
                     "创可贴", "厨房手套", "口罩", "唱片", "图钉", "大龙虾头", "奶茶杯", "干果壳", 
                     "干燥剂", "打泡网", "打火机", "放大镜", "毛巾", "涂改带", "湿纸巾", "烟蒂", 
                     "牙刷", "百洁布", "眼镜", "票据", "空调滤芯", "笔及笔芯", "纸巾", "胶带", 
                     "胶水废包装", "苍蝇拍", "茶壶碎片", "餐盒", "验孕棒", "鸡毛掸"],
                     
            # 场景和用途分类
            "厨房": ["八宝粥", "保温杯", "保鲜盒", "叉子", "茶叶罐", "刀", "锅", "咖啡渣", "餐盒", 
                   "餐垫", "茶叶"],
            "办公": ["U型回形针", "便利贴", "订书机", "档案袋", "打印机墨盒", "地铁票", "登机牌"],
            "卫生间": ["牙刷", "毛巾", "湿纸巾", "纸巾", "一次性棉签"],
            "电子产品": ["充电宝", "充电头", "充电线", "电动卷发棒", "电动剃须刀", "电动牙刷", "电路板", 
                     "电视遥控器", "电熨斗"],
            
            # 材质分类
            "玻璃": ["玻璃壶", "玻璃器皿", "玻璃球"],
            "塑料": ["PE塑料袋", "保鲜盒"],
            "金属": ["保温杯", "叉子", "刀", "钉子"],
            "纸张": ["报纸", "档案袋", "纸巾", "票据"],
            
            # 食品相关
            "水果": ["火龙果", "草莓", "橙子", "梨", "苹果", "菠萝", "菠萝蜜", "圣女果", "哈密瓜"],
            "面点": ["普通面包", "贝果", "菠萝包", "饼干", "糕点"],
            "坚果": ["巴旦木", "板栗", "核桃", "开心果", "花生皮", "瓜子"],
            "肉类": ["肠(火腿)", "骨肉相连", "鸡翅", "烤鸡烤鸭", "牛肉干", "炸鸡", "生肉"],
            "蔬菜": ["白菜", "白萝卜", "胡萝卜", "青菜", "蒜", "辣椒", "番茄", "蘑菇"],
            
            # 营养物质相关
            "维生素C": ["火龙果", "草莓", "橙子", "圣女果", "番茄", "柑橘", "柚子"],
            "维生素A": ["胡萝卜", "番茄", "青菜", "白菜", "菠菜"],
            "维生素B": ["香蕉", "白面包", "普通面包", "菠萝包", "贝果"],
            "维生素D": ["蘑菇", "鸡蛋", "蛋"],
            "维生素E": ["花生皮", "坚果", "巴旦木", "核桃", "开心果"],
            "蛋白质": ["肠(火腿)", "生肉", "骨肉相连", "鸡翅", "炸鸡", "烤鸡烤鸭"],
            "膳食纤维": ["白菜", "青菜", "白萝卜", "胡萝卜", "辣椒", "蘑菇"],
            
            # 颜色特征相关
            "红色垃圾": ["草莓", "番茄", "火龙果", "圣女果", "红豆", "苹果", "红色塑料袋", "红色饮料瓶"],
            "绿色垃圾": ["青菜", "白菜", "辣椒", "西瓜皮", "青苹果", "绿色塑料袋", "绿色饮料瓶"],
            "黄色垃圾": ["香蕉皮", "橙子", "柚子", "梨", "菠萝", "黄色塑料袋"],
            "白色垃圾": ["白萝卜", "蘑菇", "蒜", "纸巾", "一次性杯子", "餐盒", "泡沫盒子"],
            "黑色垃圾": ["黑色塑料袋", "黑色饮料瓶", "墨盒", "打印机墨盒"],
            "透明垃圾": ["玻璃壶", "玻璃器皿", "玻璃球", "塑料瓶"],
            "金属色垃圾": ["易拉罐", "金属制品", "保温杯", "不锈钢餐具", "刀", "叉子", "勺子"],
            
            # 常见包装颜色
            "红色包装": ["可乐罐", "红色易拉罐", "红色饮料瓶", "红色塑料袋"],
            "蓝色包装": ["蓝色易拉罐", "蓝色饮料瓶", "蓝色塑料袋"],
            "绿色包装": ["绿色易拉罐", "绿色饮料瓶", "绿色塑料袋"],
            "透明包装": ["透明塑料瓶", "透明塑料袋", "玻璃瓶"],
            
            # 材质特征相关
            "玻璃制品": ["玻璃壶", "玻璃器皿", "玻璃球", "LED灯泡", "玻璃灯管"],
            "塑料制品": ["塑料瓶", "塑料袋", "PE塑料袋", "保鲜盒", "一次性餐具", "一次性杯子"],
            "金属制品": ["易拉罐", "八宝粥罐", "保温杯", "刀", "叉子", "勺子", "金属制品"],
            "纸制品": ["报纸", "纸巾", "餐巾纸", "纸盒", "蛋糕盒", "档案袋"],
            
            # 垃圾产生场景
            "厨房垃圾": ["剩饭剩菜", "果皮", "菜叶", "蛋壳", "餐巾纸", "一次性餐具"],
            "办公垃圾": ["纸张", "笔", "文件", "打印纸", "订书钉", "回形针"],
            "卫生间垃圾": ["卫生纸", "纸巾", "牙刷", "肥皂盒", "沐浴露瓶"],
            "客厅垃圾": ["果皮", "零食包装", "饮料瓶", "纸巾"]
        }

        # 添加自定义词典
        self.add_custom_words()

    def add_custom_words(self):
        """添加自定义词到jieba分词词典"""
        # 添加主类别
        main_categories = ["有害垃圾", "厨余垃圾", "可回收物", "其他垃圾"]
        for category in main_categories:
            jieba.add_word(category)
        
        # 添加常用搜索词组
        word_groups = {
            # 常用查询组合
            "充电": ["充电宝", "充电头", "充电线"],
            "餐饮": ["八宝粥", "八宝粥罐", "餐盒", "餐垫", "叉子"],
            "清洁": ["厨房手套", "百洁布", "打泡网", "牙刷", "毛巾"],
            "办公": ["U型回形针", "订书机", "打印机墨盒", "便利贴", "档案袋"],
            "电子电器": ["电视遥控器", "空调遥控器", "电动剃须刀", "电动牙刷", "电路板"],
            
            # 食物分类组合
            "主食": ["八宝粥", "普通面包", "贝果", "菠萝包", "饼干", "糕点"],
            "水果": ["火龙果", "草莓", "橙子", "梨", "苹果", "菠萝", "菠萝蜜", "圣女果", "哈密瓜"],
            "蔬菜": ["白菜", "白萝卜", "胡萝卜", "青菜", "蒜", "辣椒", "番茄", "蘑菇"],
            "肉类": ["肠(火腿)", "生肉", "骨肉相连", "烤鸡烤鸭", "炸鸡", "鸡翅", "牛肉干"],
            "坚果": ["巴旦木", "板栗", "核桃", "开心果", "花生皮", "瓜子"],
            "零食": ["冰糖葫芦", "果冻", "瓜子", "薯条", "牛肉干"],
            
            # 材质分类组合
            "玻璃": ["玻璃壶", "玻璃器皿", "玻璃球", "玻璃灯管"],
            "塑料": ["PE塑料袋", "保鲜盒", "塑料瓶", "塑料盆"],
            "金属": ["保温杯", "刀", "叉子", "勺子", "易拉罐", "奶粉罐"],
            "纸制品": ["报纸", "纸牌", "纸箱", "蛋糕盒", "购物纸袋", "纸巾"],
            
            # 场景分类组合
            "厨房用品": ["保温杯", "保鲜盒", "刀", "叉子", "锅", "锅盖", "餐具"],
            "卫浴用品": ["牙刷", "电动牙刷", "毛巾", "一次性棉签"],
            "书房文具": ["尺子", "订书机", "笔及笔芯", "便利贴"],
            "客厅电器": ["遥控器", "电视遥控器", "空调遥控器"],
            
            # 危险物品组合
            "危险": ["LED灯泡", "电池", "电池板", "蓄电池", "纽扣电池"],
            "药品": ["保健品瓶", "口服液瓶", "滴眼液瓶", "碘伏空瓶", "药品包装", "药片", "药膏"],
            "化学用品": ["指甲油", "杀虫剂", "红花油", "胶水", "干燥剂"]
        }
        
        # 添加营养物质相关词组
        nutrition_words = {
            "维生素C": ["火龙果", "草莓", "橙子", "圣女果"],
            "维生素A": ["胡萝卜", "番茄", "青菜"],
            "维生素B": ["面包", "普通面包", "菠萝包"],
            "维生素D": ["蘑菇", "鸡蛋"],
            "维生素E": ["花生", "坚果", "核桃"],
            "蛋白质": ["肉类", "鸡肉", "火腿"],
            "膳食纤维": ["蔬菜", "水果"]
        }
        
        # 添加所有词组到jieba词典和语义映射
        for groups in [nutrition_words]:
            for group_name, words in groups.items():
                jieba.add_word(group_name)
                for word in words:
                    jieba.add_word(word)
                # 将词组添加到语义映射字典
                if group_name not in self.semantic_mapping:
                    self.semantic_mapping[group_name] = words

        # 添加所有词组到jieba词典
        for group_name, words in word_groups.items():
            jieba.add_word(group_name)
            for word in words:
                jieba.add_word(word)
            # 将词组添加到语义映射字典
            if group_name not in self.semantic_mapping:
                self.semantic_mapping[group_name] = words

    def get_clip_features(self, text):
        """Get CLIP features for text input"""
        inputs = self.processor(text=text, return_tensors="pt", padding=True)
        inputs = {k: v.to(self.device) for k, v in inputs.items()}
        
        with torch.no_grad():
            text_features = self.model.get_text_features(**inputs)
        return text_features.cpu().numpy()

    def search_by_text(self, query, cache_module, garbage_labels):
        """通过文字搜索垃圾分类信息，集成语义搜索功能"""
        try:
            # 使用结巴分词前先处理一些特殊情况
            query = query.strip()
            if query in self.semantic_mapping:
                # 如果完整查询词直接存在于映射字典中，不进行分词
                words = [query]
            else:
                # 否则进行分词
                words = list(jieba.cut(query))
            
            logger.info(f"搜索关键词: {words}")
            
            # 获取查询的CLIP特征
            query_features = self.get_clip_features(query)
            
            # 搜索结果
            results = []
            
            # 扩展语义搜索词
            expanded_words = words.copy()
            for word in words:
                if word in self.semantic_mapping:
                    expanded_words.extend(self.semantic_mapping[word])
            
            # 合并词组，例如"维生素"和"C"
            combined_words = []
            i = 0
            while i < len(words):
                if i + 1 < len(words):
                    combined = words[i] + words[i + 1]
                    if combined in self.semantic_mapping:
                        combined_words.append(combined)
                        expanded_words.extend(self.semantic_mapping[combined])
                        i += 2
                        continue
                combined_words.append(words[i])
                i += 1
            
            # 将合并后的词也添加到扩展词列表中
            expanded_words.extend(combined_words)
            
            # 去重
            expanded_words = list(set(expanded_words))
            
            # 遍历数据集中的所有图片
            dataset_features = cache_module.get_features()
            all_matches = []
            
            for image_path, image_features in dataset_features.items():
                try:
                    # 获取相对路径和分类信息
                    relative_path = os.path.relpath(image_path, self.dataset_path)
                    class_info = garbage_labels.get_class_from_path(image_path, self.dataset_path)
                    
                    if not class_info:
                        continue
                        
                    main_class = class_info['main_category']
                    detailed_class = class_info['class_name']
                    
                    # 计算语义相似度
                    semantic_match = False
                    for word in expanded_words:
                        if (word in main_class) or (word in detailed_class):
                            semantic_match = True
                            break
                    
                    if semantic_match:
                        # 构建URL安全的路径
                        relative_path = relative_path.replace('\\', '/')
                        url_path = quote(relative_path)
                        
                        # 添加到匹配结果列表
                        all_matches.append({
                            'type': 'image',
                            'path': f'/dataset/{url_path}',
                            'main_class': main_class,
                            'detailed_class': detailed_class,
                            'display_text': f"{main_class} - {detailed_class}",
                            'match_key': f"{main_class}_{detailed_class}"  # 用于分类的键
                        })
                
                except Exception as e:
                    logger.error(f'处理图片路径出错 {image_path}: {str(e)}')
                    continue
            
            # 如果没有找到任何匹配的图片
            if not all_matches:
                logger.warning(f"没有找到与'{query}'相关的图片")
                return {
                    "error": "没有找到相关图片，请换一种描述方式",
                    "status": "not_found"
                }
            
            # 对结果进行分类和排序
            # 1. 按主类别和详细类别分组
            category_groups = defaultdict(list)
            for match in all_matches:
                category_groups[match['match_key']].append(match)
            
            # 2. 将分组后的结果按规则排序
            sorted_results = []
            used_main_classes = set()
            used_combinations = set()

            # 第一轮：不同主类别不同详细类别
            for key, group in category_groups.items():
                main_class = group[0]['main_class']
                if main_class not in used_main_classes:
                    used_main_classes.add(main_class)
                    used_combinations.add(key)
                    sorted_results.append(random.choice(group))

            # 第二轮：相同主类别不同详细类别
            for key, group in category_groups.items():
                if key not in used_combinations:
                    used_combinations.add(key)
                    sorted_results.append(random.choice(group))

            # 第三轮：相同主类别相同详细类别（补充剩余位置）
            remaining_slots = 16 - len(sorted_results)
            if remaining_slots > 0:
                remaining_images = []
                for key, group in category_groups.items():
                    remaining_images.extend(group)
                
                # 从剩余图片中随机选择
                if remaining_images:
                    random.shuffle(remaining_images)
                    sorted_results.extend(remaining_images[:remaining_slots])

            # 确保结果不超过16张
            results = sorted_results[:16]
            
            logger.info(f"共返回 {len(results)} 个结果")
            return results
            
        except Exception as e:
            logger.error(f'文字搜索出错: {str(e)}')
            return {
                "error": "搜索过程中出现错误，请稍后重试",
                "status": "error"
            }

    def find_similar_images(self, query_features, cache_module, garbage_labels, top_k=4, vector_db=None):
        """查找相似图片，使用向量数据库实现"""
        if vector_db is None:
            logger.error("向量数据库未初始化")
            return []
            
        return vector_db.find_similar_in_dataset(query_features, top_k)