import pyodbc
import numpy as np
from sentence_transformers import SentenceTransformer
import faiss
import pickle
import os
from typing import List, Dict, Tuple, Optional
import requests
import json
import time
from nltk.corpus import wordnet
import nltk
from collections import defaultdict
import itertools
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
import gc
from dataclasses import dataclass
import logging
import torch
import re

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 下载必要的NLTK数据
try:
    nltk.data.find('corpora/wordnet')
except LookupError:
    logger.info("下载WordNet数据...")
    nltk.download('wordnet', quiet=True)


@dataclass
class QueryResult:
    """查询结果数据结构"""
    query: str
    retrieved_products: List[Tuple[Dict, float]]
    products_table: str
    generated_response: str
    retrieval_time: float
    generation_time: float
    total_time: float


class HighPerformanceSynonymExpander:
    """高性能同义词扩展器 - 针对10万级数据优化"""

    def __init__(self, cache_size: int = 5000):
        self.synonym_cache = defaultdict(list)
        self.cache_size = cache_size
        self._setup_domain_synonyms()
        self._preload_common_terms()

    def _setup_domain_synonyms(self):
        """设置海关领域专业同义词"""
        self.domain_synonyms = {
            # 产品类别
            'laptop': ['notebook', 'portable computer', 'ultrabook'],
            'computer': ['pc', 'desktop', 'workstation'],
            'phone': ['mobile', 'cellphone', 'smartphone'],
            'tablet': ['ipad', 'slate', 'pad'],
            'tv': ['television', 'display', 'monitor'],

            # 玩具类别
            'toy': ['plaything', 'educational toy', 'game'],
            'doll': ['action figure', 'fashion doll', 'puppet'],
            'car': ['toy vehicle', 'model car', 'automobile toy'],

            # 材料类型
            'plastic': ['polymer', 'synthetic material', 'resin'],
            'metal': ['metallic', 'alloy', 'steel', 'aluminum'],
            'wood': ['wooden', 'timber', 'lumber'],
            'fabric': ['cloth', 'textile', 'material'],

            # 电子元件
            'chip': ['semiconductor', 'microchip', 'ic'],
            'circuit': ['pcb', 'board', 'electronic circuit'],
            'battery': ['cell', 'power source', 'accumulator'],

            # 计量单位
            'CTNS': ['cartons', 'cases', 'boxes'],
            'KGS': ['kilograms', 'kilos'],
            'PCS': ['pieces', 'units', 'items'],
        }

    def _preload_common_terms(self):
        """预加载常见术语的同义词"""
        common_terms = ['plastic', 'metal', 'toy', 'electronic', 'fabric', 'wood', 'computer']
        for term in common_terms:
            self.get_synonyms(term, 2)  # 预热缓存

    def get_synonyms(self, word: str, max_synonyms: int = 2) -> List[str]:
        """高性能同义词获取"""
        if not word or len(word) < 2:
            return []

        cache_key = f"{word}_{max_synonyms}"

        # 检查缓存（最快路径）
        if cache_key in self.synonym_cache:
            return self.synonym_cache[cache_key]

        synonyms = set()
        word_lower = word.lower()

        # 1. 领域同义词（最快）
        if word_lower in self.domain_synonyms:
            synonyms.update(self.domain_synonyms[word_lower][:max_synonyms])

        # 2. WordNet同义词（按需加载）
        if len(synonyms) < max_synonyms:
            wordnet_syns = self._get_wordnet_synonyms_fast(word_lower, max_synonyms - len(synonyms))
            synonyms.update(wordnet_syns)

        result = list(synonyms)[:max_synonyms]
        self.synonym_cache[cache_key] = result

        # 高效的缓存管理
        if len(self.synonym_cache) > self.cache_size:
            self._clean_cache()

        return result

    def _get_wordnet_synonyms_fast(self, word: str, max_synonyms: int) -> List[str]:
        """快速WordNet同义词查询"""
        synonyms = set()
        try:
            # 限制查询数量以提高性能
            for syn in wordnet.synsets(word, lang='eng')[:3]:  # 只查前3个synset
                for lemma in syn.lemmas()[:2]:  # 每个synset只取前2个lemma
                    if lemma.name().lower() != word:
                        synonym = lemma.name().replace('_', ' ').lower()
                        if 1 <= len(synonym.split()) <= 2:  # 只保留1-2个单词的同义词
                            synonyms.add(synonym)
                    if len(synonyms) >= max_synonyms:
                        break
                if len(synonyms) >= max_synonyms:
                    break
        except Exception:
            pass  # 静默失败，不影响主流程

        return list(synonyms)

    def _clean_cache(self):
        """高效的缓存清理"""
        if len(self.synonym_cache) > self.cache_size:
            # 随机移除10%的缓存项（简单高效）
            keys = list(self.synonym_cache.keys())
            remove_count = len(keys) // 10
            for key in keys[:remove_count]:
                del self.synonym_cache[key]

    def expand_query_batch(self, queries: List[str], max_expansions: int = 2) -> Dict[str, List[str]]:
        """批量扩展查询 - 大幅提升性能"""
        expanded_queries = {}

        for query in queries:
            words = query.lower().split()
            if len(words) == 0:
                expanded_queries[query] = [query]
                continue

            expansions = [query]  # 包含原始查询

            # 对短查询进行扩展
            if len(words) <= 3:
                for i, word in enumerate(words):
                    synonyms = self.get_synonyms(word, 1)  # 每个词只取1个同义词
                    for synonym in synonyms:
                        new_words = words.copy()
                        new_words[i] = synonym
                        new_query = ' '.join(new_words)
                        if new_query != query and new_query not in expansions:
                            expansions.append(new_query)
                            if len(expansions) >= max_expansions + 1:
                                break
                    if len(expansions) >= max_expansions + 1:
                        break

            expanded_queries[query] = expansions[:max_expansions + 1]

        return expanded_queries


class OptimizedVectorIndex:
    """优化的向量索引管理器"""

    def __init__(self, embedding_model, index_path: str = r"D:\programData"):
        self.embedding_model = embedding_model
        self.index_path = index_path
        self.index = None
        self.products = []
        self.product_texts = []
        self.index_lock = threading.Lock()

    def build_or_load_index(self, db_path: str, force_rebuild: bool = False):
        """构建或加载索引 - 支持增量更新"""
        index_file = os.path.join(self.index_path, "qwen_vl_rag_index.pkl")
        metadata_file = os.path.join(self.index_path, "index_metadata.json")

        if not force_rebuild and os.path.exists(index_file):
            logger.info("加载现有RAG索引...")
            with open(index_file, 'rb') as f:
                data = pickle.load(f)
                self.index = data['index']
                self.products = data['products']
                self.product_texts = data['product_texts']
            logger.info(f"索引加载完成: {len(self.products)} 个产品")
        else:
            logger.info("构建新的RAG索引...")
            self._build_index_from_db(db_path)

    def _build_index_from_db(self, db_path: str):
        """从数据库构建索引 - 修复Access分页语法"""
        connection_string = f'DRIVER={{Microsoft Access Driver (*.mdb, *.accdb)}};DBQ={db_path};'

        try:
            conn = pyodbc.connect(connection_string)
            cursor = conn.cursor()

            # 获取总记录数
            cursor.execute("SELECT COUNT(*) FROM 清关资料")
            total_count = cursor.fetchone()[0]
            logger.info(f"数据库中共有 {total_count} 条记录")

            # 对于Access数据库，使用TOP语法进行分页，而不是OFFSET
            all_products = []

            # 如果数据量不大（<10000），一次性读取
            if total_count <= 10000:
                logger.info("数据量较小，一次性读取...")
                cursor.execute("SELECT * FROM 清关资料")
                columns = [column[0] for column in cursor.description]

                for row in cursor.fetchall():
                    row_dict = dict(zip(columns, row))
                    all_products.append(row_dict)
            else:
                # 对于大数据量，使用TOP分页（Access不支持OFFSET）
                batch_size = 1000
                total_batches = (total_count + batch_size - 1) // batch_size

                # 首先获取所有ID用于分页
                cursor.execute("SELECT ID FROM 清关资料 ORDER BY ID")
                all_ids = [row[0] for row in cursor.fetchall()]

                for batch_num in range(total_batches):
                    start_idx = batch_num * batch_size
                    end_idx = min((batch_num + 1) * batch_size, total_count)
                    batch_ids = all_ids[start_idx:end_idx]

                    if batch_ids:
                        # 构建IN查询
                        id_placeholders = ','.join('?' * len(batch_ids))
                        query = f"SELECT * FROM 清关资料 WHERE ID IN ({id_placeholders})"

                        cursor.execute(query, batch_ids)
                        columns = [column[0] for column in cursor.description]

                        batch_products = []
                        for row in cursor.fetchall():
                            row_dict = dict(zip(columns, row))
                            batch_products.append(row_dict)

                        all_products.extend(batch_products)
                        logger.info(
                            f"已加载批次 {batch_num + 1}/{total_batches}: {len(all_products)}/{total_count} 条记录")

                    # 定期垃圾回收
                    if batch_num % 10 == 0:
                        gc.collect()

            conn.close()

            # 构建索引
            self._create_index(all_products)

        except Exception as e:
            logger.error(f"数据库查询失败: {e}")
            # 尝试备用方案
            self._fallback_build_index(db_path)

    def _fallback_build_index(self, db_path: str):
        """备用方案：简单查询所有数据"""
        logger.info("尝试备用方案...")
        connection_string = f'DRIVER={{Microsoft Access Driver (*.mdb, *.accdb)}};DBQ={db_path};'

        try:
            conn = pyodbc.connect(connection_string)
            cursor = conn.cursor()

            # 简单查询所有数据
            cursor.execute("SELECT * FROM 清关资料")
            columns = [column[0] for column in cursor.description]

            all_products = []
            count = 0
            for row in cursor.fetchall():
                row_dict = dict(zip(columns, row))
                all_products.append(row_dict)
                count += 1

                # 进度显示
                if count % 1000 == 0:
                    logger.info(f"已加载 {count} 条记录")

            conn.close()
            logger.info(f"备用方案成功加载 {len(all_products)} 条记录")

            # 构建索引
            self._create_index(all_products)

        except Exception as e:
            logger.error(f"备用方案也失败: {e}")
            raise

    def _create_index(self, products: List[Dict]):
        """创建向量索引 - 内存优化版本"""
        logger.info("开始构建向量索引...")
        self.products = products
        self.product_texts = self._create_product_texts(products)

        # 分批生成嵌入向量
        batch_size = 1000
        all_embeddings = []

        for i in range(0, len(self.product_texts), batch_size):
            batch_texts = self.product_texts[i:i + batch_size]
            logger.info(f"生成嵌入向量批次 {i // batch_size + 1}/{(len(self.product_texts) - 1) // batch_size + 1}")

            batch_embeddings = self.embedding_model.encode(
                batch_texts,
                show_progress_bar=False,  # 关闭进度条提升性能
                batch_size=64,
                convert_to_numpy=True
            )
            all_embeddings.append(batch_embeddings)

            # 释放内存
            del batch_embeddings
            gc.collect()

        # 合并所有嵌入向量
        embeddings = np.vstack(all_embeddings)
        logger.info(f"嵌入向量生成完成: {embeddings.shape}")

        # 构建FAISS索引
        dimension = embeddings.shape[1]
        self.index = faiss.IndexFlatIP(dimension)

        # 归一化并添加向量
        faiss.normalize_L2(embeddings)
        self.index.add(embeddings.astype('float32'))

        # 保存索引
        self._save_index()

        logger.info(f"RAG索引构建完成: {len(self.products)} 个产品")

    def _create_product_texts(self, products: List[Dict]) -> List[str]:
        """创建产品文本表示 - 恢复原始所有字段"""
        texts = []
        for product in products:
            # 恢复原始代码中的所有字段
            product_name = product.get('英文品名', '')
            cn_name = product.get('中文品名', '')
            material = product.get('产品材质', '')
            hs_code = product.get('产品海关编码', '')
            base_tax = product.get('基础关税', '')
            add_tax = product.get('附加关税', '')
            ctns = product.get('件数CTNS', '')
            product_count = product.get('产品申报数量', '')
            weight = product.get('净重KGS', '')
            actual_weight = product.get('收货实重', '')
            volume = product.get('方数', '')
            product_price = product.get('产品申报单价', '')
            total_value = product.get('总价', '')
            container_no = product.get('柜号', '')
            file_path = product.get('文件路径', '')

            text_parts = [f"英文品名: {product_name}"]
            text_parts.append(f"中文品名: {cn_name}")
            text_parts.append(f"产品材质: {material}")
            text_parts.append(f"产品海关编码: {hs_code}")
            text_parts.append(f"基础关税: {base_tax}")
            text_parts.append(f"附加关税: {add_tax}")
            text_parts.append(f"件数CTNS: {ctns}")
            text_parts.append(f"产品申报数量: {product_count}")
            text_parts.append(f"净重KGS: {weight}")
            text_parts.append(f"收货实重: {actual_weight}")
            text_parts.append(f"方数: {volume}")
            text_parts.append(f"产品申报单价: {product_price}")
            text_parts.append(f"总价: {total_value}")
            if container_no:
                text_parts.append(f"柜号: {container_no}")
            text_parts.append(f"文件路径: {file_path}")

            texts.append("; ".join(text_parts))

        return texts

    def _save_index(self):
        """保存索引到文件"""
        index_file = os.path.join(self.index_path, "qwen_vl_rag_index.pkl")

        with open(index_file, 'wb') as f:
            pickle.dump({
                'index': self.index,
                'products': self.products,
                'product_texts': self.product_texts
            }, f, protocol=pickle.HIGHEST_PROTOCOL)

        logger.info(f"索引已保存到: {index_file}")

    def search_batch(self, queries: List[str], top_k: int = 10) -> Dict[str, List[Tuple[Dict, float]]]:
        """批量搜索 - 大幅提升性能"""
        with self.index_lock:
            results = {}

            # 批量编码查询
            query_embeddings = self.embedding_model.encode(queries, show_progress_bar=False, batch_size=32)
            faiss.normalize_L2(query_embeddings)

            # 批量搜索
            scores, indices = self.index.search(query_embeddings.astype('float32'), top_k * 2)  # 多取一些用于去重

            for i, query in enumerate(queries):
                query_results = []
                seen_products = set()

                for score, idx in zip(scores[i], indices[i]):
                    if idx < len(self.products) and score > 0.15:  # 提高相似度阈值
                        product = self.products[idx]
                        product_id = f"{product.get('英文品名', '')}_{product.get('产品海关编码', '')}"

                        if product_id not in seen_products:
                            seen_products.add(product_id)
                            query_results.append((product, float(score)))

                            if len(query_results) >= top_k:
                                break

                results[query] = query_results

            return results


class HighPerformanceQwenVLRAGSystem:
    """高性能RAG系统 - 针对10万级数据优化"""

    def __init__(self, db_path: str, qwen_api_url: str = "http://localhost:8000",
                 embedding_model: str = 'all-MiniLM-L6-v2', max_workers: int = 4):

        self.db_path = db_path
        self.qwen_api_url = qwen_api_url
        self.max_workers = max_workers

        # 初始化组件
        logger.info("初始化嵌入模型...")
        self.embedding_model = SentenceTransformer(embedding_model)

        logger.info("初始化同义词扩展器...")
        self.synonym_expander = HighPerformanceSynonymExpander()

        logger.info("初始化向量索引...")
        self.vector_index = OptimizedVectorIndex(self.embedding_model)
        self.vector_index.build_or_load_index(db_path)

        # 测试连接
        self.test_api_connection()

        # 线程池
        self.thread_pool = ThreadPoolExecutor(max_workers=max_workers)

    def test_api_connection(self):
        """测试API连接"""
        try:
            response = requests.get(f"{self.qwen_api_url}/health", timeout=10)
            if response.status_code == 200:
                logger.info("✅ API连接成功")
                return True
            else:
                logger.warning("❌ API连接失败")
                return False
        except Exception as e:
            logger.warning(f"❌ API连接异常: {e}")
            return False

    def query_products_parallel(self, product_names: List[str], top_k: int = 5) -> Dict[str, QueryResult]:
        """并行查询产品 - 高性能版本"""
        logger.info(f"开始并行处理 {len(product_names)} 个查询")
        start_time = time.time()

        # 批量扩展查询
        expanded_queries_map = self.synonym_expander.expand_query_batch(product_names)

        # 准备所有查询（包括原始查询和扩展查询）
        all_queries = []
        query_mapping = {}  # 映射：实际查询 -> 原始查询

        for original_query, expansions in expanded_queries_map.items():
            for query in expansions:
                all_queries.append(query)
                query_mapping[query] = original_query

        # 批量向量检索
        logger.info("执行批量向量检索...")
        batch_results = self.vector_index.search_batch(all_queries, top_k * 2)

        # 合并同义词查询结果
        merged_results = {}
        for original_query in product_names:
            expansions = expanded_queries_map[original_query]
            all_product_scores = {}

            for query in expansions:
                if query in batch_results:
                    for product, score in batch_results[query]:
                        product_id = f"{product.get('英文品名', '')}_{product.get('产品海关编码', '')}"
                        if product_id not in all_product_scores or score > all_product_scores[product_id][0]:
                            all_product_scores[product_id] = (score, product)

            # 排序并取前top_k
            sorted_products = sorted(all_product_scores.values(), key=lambda x: x[0], reverse=True)
            final_products = [(product, score) for score, product in sorted_products[:top_k]]

            merged_results[original_query] = final_products

        # 并行生成分析结果
        logger.info("并行生成分析结果...")
        futures = {}
        for query in product_names:
            future = self.thread_pool.submit(self._process_single_query, query, merged_results[query])
            futures[future] = query

        # 收集结果
        results = {}
        for future in as_completed(futures):
            query = futures[future]
            try:
                result = future.result(timeout=120)  # 2分钟超时
                results[query] = result
            except Exception as e:
                logger.error(f"查询处理失败 {query}: {e}")
                results[query] = self._create_error_result(query, str(e))

        total_time = time.time() - start_time
        logger.info(f"并行处理完成，总耗时: {total_time:.2f}s")

        return results

    def _process_single_query(self, query: str, similar_products: List[Tuple[Dict, float]]) -> QueryResult:
        """处理单个查询"""
        retrieval_time = 0  # 检索时间在外部计算
        start_time = time.time()

        if similar_products:
            products_table = self.format_products_table(similar_products)
            response = self.call_qwen_vl_with_text(query, products_table)
        else:
            products_table = ""
            response = f"❌ 未找到与 '{query}' 相似的商品。"

        generation_time = time.time() - start_time

        return QueryResult(
            query=query,
            retrieved_products=similar_products,
            products_table=products_table,
            generated_response=response,
            retrieval_time=retrieval_time,
            generation_time=generation_time,
            total_time=retrieval_time + generation_time
        )

    def _create_error_result(self, query: str, error_msg: str) -> QueryResult:
        """创建错误结果"""
        return QueryResult(
            query=query,
            retrieved_products=[],
            products_table="",
            generated_response=f"❌ 处理查询失败: {error_msg}",
            retrieval_time=0,
            generation_time=0,
            total_time=0
        )

    def format_products_table(self, products: List[Tuple[Dict, float]]) -> str:
        """将产品列表格式化为文本表格 - 恢复原始完整格式"""
        if not products:
            return "无匹配商品"

        table_lines = []
        table_lines.append("=" * 120)
        table_lines.append(
            f"{'序号':<4} {'英文品名':<30} {'中文品名':<30} {'产品材质':<30} {'产品海关编码':<30} {'基础关税':<8} {'附加关税':<8} {'件数CTNS':<8} {'产品申报数量':<8} {'净重KGS':<8} {'收货实重':<8} {'方数':<8} {'产品申报单价':<8} {'总价':<8} {'柜号':<30} {'文件路径':<30}")
        table_lines.append("-" * 120)

        for i, (product, score) in enumerate(products, 1):
            product_name = product.get('英文品名', '') or "N/A"
            cn_name = product.get('中文品名', '') or "N/A"
            material = product.get('产品材质', '') or "N/A"
            hs_code = product.get('产品海关编码', '') or "N/A"
            base_tax = product.get('基础关税', '') or "N/A"
            add_tax = product.get('附加关税', '') or "N/A"
            ctns = product.get('件数CTNS', '') or "N/A"
            product_count = product.get('产品申报数量', '') or "N/A"
            weight = product.get('净重KGS', '') or "N/A"
            actual_weight = product.get('收货实重', '') or "N/A"
            volume = product.get('方数', '') or "N/A"
            product_price = product.get('产品申报单价', '') or "N/A"
            total_value = product.get('总价', '') or "N/A"
            container_no = product.get('柜号', '') or "N/A"
            file_path = product.get('文件路径', '') or "N/A"
            table_lines.append(
                f"{i:<4} {product_name:<30} {cn_name:<30} {material:<30} {hs_code:<30} {base_tax:<8} {add_tax:<8} {ctns:<8} {product_count:<8} {weight:<8} {actual_weight:<8} {volume:<8} {product_price:<8} {total_value:<8} {container_no:<30} {file_path:<30}"
            )

        table_lines.append("=" * 120)
        return "\n".join(table_lines)

    def call_qwen_vl_with_text(self, query: str, products_table: str) -> str:
        """使用纯文本调用Qwen-VL进行分析 - 恢复原始提示词"""
        prompt = f"""
你是一个专业的海关商品分类专家。请根据以下商品匹配结果，为用户查询提供专业的匹配建议。

用户查询: "{query}"

匹配结果表格:
{products_table}

请基于以上匹配结果，完成以下分析任务：

1. **匹配度分析**
   - 分析查询商品与匹配商品的相似程度
   - 评估整体匹配质量（优秀/良好/一般/较差）

2. **推荐建议**
   - 推荐最匹配的前3个商品
   - 说明每个推荐商品的匹配理由
   - 商品用途必须保持一致
   - 对比关键属性（规格、材质、用途等）

请用专业、简洁、清晰的语言回答，结构清晰，重点突出；

请在最后一行以如下格式返回最匹配的商品信息: bestMatch: 英文品名，如果没有匹配到商品请返回：bestMatch: None 
"""
# 3. **专业建议**
#    - 海关分类建议（基于HS编码）
#    - 潜在的风险或注意事项
#    - 是否需要进一步信息确认
#
# 4. **总结**
#    - 给出最终的匹配结论
#    - 提供后续操作建议
        for attempt in range(3):
            try:
                result = self._call_chat_completions(prompt)
                return extract_best_match(result)
            except Exception as e:
                if attempt == 2:  # 最后一次尝试
                    logger.error(f"Qwen-VL分析失败: {e}")
                    return self._generate_fallback_analysis(query, products_table)
                else:
                    time.sleep(1)  # 重试前等待

    def _call_chat_completions(self, prompt: str) -> str:
        """调用chat/completions接口 - 恢复原始参数"""
        payload = {
            "model": "qwen2.5-vl-8b-instruct",
            "messages": [
                {
                    "role": "system",
                    "content": "你是一个专业的海关商品分类专家，擅长分析商品相似度和提供匹配建议。请用专业、清晰的语言回答。"
                },
                {
                    "role": "user",
                    "content": prompt
                }
            ],
            "max_tokens": 1000,
            "temperature": 0.3,
            "top_p": 0.9
        }

        response = requests.post(
            f"{self.qwen_api_url}/v1/chat/completions",
            json=payload,
            timeout=560
        )
        logger.info(f"response: {response.json()}")
        if response.status_code == 200:
            result = response.json()
            if 'choices' in result and len(result['choices']) > 0:
                return result['choices'][0]['message']['content']
            else:
                raise Exception("API返回格式异常")
        else:
            raise Exception(f"HTTP错误 {response.status_code}: {response.text}")

    def _generate_fallback_analysis(self, query: str, products_table: str) -> str:
        """生成备选分析报告 - 恢复原始格式"""
        return f"""
🔍 **商品匹配分析报告**

**查询商品:** {query}

**匹配结果概览:**
{products_table}

**简要分析:**
由于AI分析服务暂时不可用，以下是基于匹配结果的简要建议：

1. **查看相似度最高的商品** - 通常相似度 > 0.8 的商品是较好的匹配
2. **对比关键属性** - 重点关注规格、材质、用途的匹配程度
3. **检查HS编码** - 相同或相近的HS编码表示商品类别相似

**建议操作:**
- 如果找到高度匹配商品，可直接参考其海关分类
- 如匹配度一般，建议调整查询关键词或提供更多商品细节
- 可联系专业报关人员进行最终确认

*注: 此为基础分析，建议结合专业判断使用。*
"""

    def print_results_summary(self, results: Dict[str, QueryResult]):
        """打印结果摘要"""
        logger.info("\n" + "=" * 80)
        logger.info("查询结果摘要")
        logger.info("=" * 80)

        for query, result in results.items():
            logger.info(f"\n📦 查询: {query}")
            logger.info(f"⏱️  耗时: {result.total_time:.2f}s")
            logger.info(f"📊 匹配商品数: {len(result.retrieved_products)}")

            if result.retrieved_products:
                best_match = result.retrieved_products[0]
                logger.info(f"🏆 最佳匹配: {best_match[0].get('英文品名', 'N/A')} (相似度: {best_match[1]:.3f})")

            # 显示分析摘要
            response_preview = result.generated_response[:100] + "..." if len(
                result.generated_response) > 100 else result.generated_response
            logger.info(f"🤖 分析摘要: {response_preview}")

        logger.info("=" * 80)

    def print_detailed_results(self, results: Dict[str, QueryResult]):
        """打印详细结果 - 恢复原始格式"""
        for query, result in results.items():
            print(f"\n{'=' * 100}")
            print(f"📦 查询: {query}")
            print(f"{'=' * 100}")
            print(
                f"⏱️  总耗时: {result.total_time:.2f}s (检索: {result.retrieval_time:.2f}s, 生成: {result.generation_time:.2f}s)")

            print(f"\n📊 检索结果表格:")
            print(result.products_table)

            print(f"\n🤖 Qwen2.5-VL专业分析:")
            print(result.generated_response)

            print(f"\n{'=' * 100}")


# 批量处理版本 - 恢复原始Batch类功能
class BatchQwenVLRAG(HighPerformanceQwenVLRAGSystem):
    """批量处理优化的RAG系统"""

    def batch_query_products(self, product_names: List[str], top_k: int = 5, batch_size: int = 5) -> Dict[
        str, QueryResult]:
        """批量查询产品"""
        all_results = {}

        for i in range(0, len(product_names), batch_size):
            batch = product_names[i:i + batch_size]
            print(f"\n🔄 处理批次 {i // batch_size + 1}: {batch}")

            batch_results = self.query_products_parallel(batch, top_k)
            all_results.update(batch_results)

            # 保存批次结果
            # self._save_batch_results(batch_results, i // batch_size + 1)

            # 批次间延迟
            # if i + batch_size < len(product_names):
            #     print("⏳ 批次间休息...")
            #     time.sleep(2)

        return all_results

    def _save_batch_results(self, results: Dict[str, QueryResult], batch_num: int):
        """保存批次结果"""
        filename = f"batch_results_{batch_num}.json"
        try:
            with open(filename, 'w', encoding='utf-8') as f:
                # 转换无法序列化的对象
                serializable_results = {}
                for query, result in results.items():
                    serializable_results[query] = {
                        'retrieved_products': [
                            {
                                'product': {k: v for k, v in product.items()},
                                'score': score
                            }
                            for product, score in result.retrieved_products
                        ],
                        'products_table': result.products_table,
                        'generated_response': result.generated_response,
                        'retrieval_time': result.retrieval_time,
                        'generation_time': result.generation_time,
                        'total_time': result.total_time
                    }

                json.dump(serializable_results, f, ensure_ascii=False, indent=2)
            print(f"💾 批次 {batch_num} 结果已保存到 {filename}")
        except Exception as e:
            print(f"❌ 保存批次结果失败: {e}")


def extract_best_match(text):
    """
    从字符串中提取bestMatch的值
    """
    # 方法1: 使用正则表达式匹配
    pattern = r'bestMatch:\s*(.+)'
    match = re.search(pattern, text)

    if match:
        return match.group(1)
    else:
        return "None"

# 使用示例
def main():
    """主函数示例"""
    # 配置参数
    db_path = "path/to/your/清关资料.accdb"  # 替换为你的数据库路径
    qwen_api_url = "http://localhost:8000"  # 你的Qwen服务器地址

    print("🚀 初始化高性能Qwen2.5-VL-8B RAG系统...")

    try:
        # 初始化RAG系统
        rag_system = HighPerformanceQwenVLRAGSystem(db_path, qwen_api_url,embedding_model=r"D:\project\llt-model\all-MiniLM-L6-v2")

        # 测试查询
        test_queries = [
            "toy dog",
            "LED light bulb",
            "laptop computer",
            "plastic toy",
            "electric fan"
        ]

        print("\n🎯 开始商品匹配分析...")
        results = rag_system.query_products_parallel(test_queries, top_k=5)

        # 打印结果
        rag_system.print_detailed_results(results)

    except Exception as e:
        print(f"❌ 系统初始化失败: {e}")


def batch_processing_example(queries=None):
    """批量处理示例"""
    db_path = r"D:\programData\access\test.accdb"
    qwen_api_url = "http://localhost:8000"

    try:
        rag_system = BatchQwenVLRAG(db_path, qwen_api_url,embedding_model=r"D:\project\llt-model\all-MiniLM-L6-v2")



        print(f"📋 开始批量处理 {len(queries)} 个查询...")
        results = rag_system.batch_query_products(queries, top_k=3, batch_size=3)

        print(f"\n✅ 批量处理完成，共处理 {len(results)} 个查询")
        return results

    except Exception as e:
        print(f"❌ 批量处理失败: {e}")
    return None
def torch_gc():
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        torch.cuda.ipc_collect()

if __name__ == "__main__":
    torch_gc()
    # 选择运行模式
    # print("请选择运行模式:")
    # print("1. 单次查询示例")
    # print("2. 批量处理示例")

    # choice = input("请输入选择 (1-2): ").strip()
    # 从文件读取查询列表或使用示例查询
    queries = [
        "toy dog", "LED bulb", "laptop", "plastic toy", "electric device",
        "children toy", "light fixture", "computer accessory", "household appliance"
    ]
    batch_processing_example(queries)

    # if choice == "2":
    #     batch_processing_example()
    # else:
    #     main()

    torch_gc()