import pyodbc
import requests
import json
from typing import List, Dict, Tuple
import time
import re
from collections import Counter
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import pickle
import os


class EnglishSimilaritySearch:
    def __init__(self, db_path: str, qwen_api_url: str = None):
        self.db_path = db_path
        self.qwen_api_url = qwen_api_url
        self.connection_string = f'DRIVER={{Microsoft Access Driver (*.mdb, *.accdb)}};DBQ={db_path};'

        # 英文停用词
        self.stop_words = set([
            'a', 'an', 'the', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with', 'by',
            'as', 'is', 'are', 'was', 'were', 'be', 'been', 'have', 'has', 'had', 'do', 'does', 'did',
            'this', 'that', 'these', 'those', 'from', 'up', 'down', 'out', 'so', 'such', 'too', 'very'
        ])

        # 预加载索引
        self.product_index = {}
        self.load_product_index()

    def extract_english_keywords(self, text: str) -> List[str]:
        """提取英文关键词"""
        if not text:
            return []

        # 转换为小写，分割单词
        words = re.findall(r'\b[a-zA-Z]{2,}\b', text.lower())

        # 过滤停用词和短词
        keywords = []
        for word in words:
            if (len(word) >= 3 and
                    word not in self.stop_words and
                    not word.isdigit()):
                keywords.append(word)

        return keywords

    def load_product_index(self):
        """预加载产品名称建立索引"""
        print("正在加载英文产品索引...")
        all_products = self.get_all_products()

        for product in all_products:
            product_name = product.get('品名', '')
            if product_name:
                # 提取英文关键词
                keywords = self.extract_english_keywords(product_name)
                for keyword in keywords:
                    if keyword not in self.product_index:
                        self.product_index[keyword] = []
                    self.product_index[keyword].append(product)

        print(f"英文索引构建完成，共 {len(all_products)} 个产品，{len(self.product_index)} 个关键词")

    def get_all_products(self) -> List[Dict]:
        """获取所有产品数据"""
        try:
            conn = pyodbc.connect(self.connection_string)
            cursor = conn.cursor()
            cursor.execute("SELECT * FROM 清关资料")
            columns = [column[0] for column in cursor.description]

            results = []
            for row in cursor.fetchall():
                row_dict = dict(zip(columns, row))
                results.append(row_dict)

            conn.close()
            return results
        except Exception as e:
            print(f"数据库查询失败: {e}")
            return []

    def fast_english_prefilter(self, query: str, top_n: int = 100) -> List[Dict]:
        """英文快速预筛选"""
        query_keywords = self.extract_english_keywords(query)

        if not query_keywords:
            return []

        # 计算每个产品的匹配分数
        product_scores = {}

        for keyword in query_keywords:
            if keyword in self.product_index:
                for product in self.product_index[keyword]:
                    product_id = id(product)
                    if product_id not in product_scores:
                        product_scores[product_id] = 0
                    product_scores[product_id] += 1

        # 转换为列表并排序
        scored_products = []
        for product_id, score in product_scores.items():
            # 找到对应的product对象
            for product_list in self.product_index.values():
                for product in product_list:
                    if id(product) == product_id:
                        product_name = product.get('品名', '')
                        # 使用英文相似度算法
                        text_similarity = self.english_text_similarity(query, product_name)
                        final_score = score + text_similarity * 2  # 给文本相似度更高权重
                        scored_products.append((product, final_score))
                        break

        # 去重并排序
        unique_products = {}
        for product, score in scored_products:
            product_name = product.get('品名', '')
            if product_name not in unique_products or score > unique_products[product_name][1]:
                unique_products[product_name] = (product, score)

        # 按分数排序，返回前top_n个
        sorted_products = sorted(unique_products.values(), key=lambda x: x[1], reverse=True)
        return [item[0] for item in sorted_products[:top_n]]

    def english_text_similarity(self, str1: str, str2: str) -> float:
        """英文文本相似度计算"""
        str1 = str1.lower().strip()
        str2 = str2.lower().strip()

        # 如果完全相同
        if str1 == str2:
            return 1.0

        # 提取单词
        words1 = set(self.extract_english_keywords(str1))
        words2 = set(self.extract_english_keywords(str2))

        if not words1 or not words2:
            return 0.0

        # Jaccard相似度
        intersection = len(words1.intersection(words2))
        union = len(words1.union(words2))
        jaccard_sim = intersection / union if union > 0 else 0

        # 编辑距离相似度
        from difflib import SequenceMatcher
        edit_sim = SequenceMatcher(None, str1, str2).ratio()

        # 结合两种相似度
        return (jaccard_sim + edit_sim) / 2

    def qwen_similarity_batch(self, query: str, candidates: List[Dict]) -> List[Tuple[Dict, float]]:
        """使用Qwen批量计算英文相似度"""
        if not candidates:
            return []

        # 构建英文批量查询prompt
        candidate_texts = [candidate.get('品名', '') for candidate in candidates]

        prompt = f"""
        Please evaluate the similarity between the query product and candidate products.

        Query product: "{query}"

        Candidate products:
        """

        for i, candidate in enumerate(candidate_texts):
            prompt += f"{i + 1}. {candidate}\n"

        prompt += """
        Please provide a similarity score between 0-1 for each candidate product, considering:
        1. Product category similarity
        2. Function and usage similarity  
        3. Keyword overlap
        4. Likely HS code classification

        Return in the format:
        1:0.85
        2:0.72
        3:0.93
        ...
        Only return the number mappings, no other text.
        """

        try:
            response = self.call_qwen_api(prompt)
            return self.parse_batch_response(candidates, response)
        except Exception as e:
            print(f"批量相似度计算失败: {e}")
            return self.fallback_english_similarity(query, candidates)

    def call_qwen_api(self, prompt: str) -> str:
        """调用Qwen API"""
        # 根据你的Qwen部署方式实现
        payload = {
            "model": "qwen2-vl-8b",
            "messages": [{"role": "user", "content": prompt}],
            "max_tokens": 500,
            "temperature": 0.1
        }

        response = requests.post(self.qwen_api_url, json=payload, timeout=30)
        return response.json()['choices'][0]['message']['content']

    def parse_batch_response(self, candidates: List[Dict], response: str) -> List[Tuple[Dict, float]]:
        """解析批量响应"""
        results = []
        lines = response.strip().split('\n')

        for line in lines:
            if ':' in line:
                try:
                    idx_str, score_str = line.split(':', 1)
                    idx = int(idx_str.strip()) - 1
                    score = float(score_str.strip())

                    if 0 <= idx < len(candidates):
                        results.append((candidates[idx], score))
                except ValueError:
                    continue

        return sorted(results, key=lambda x: x[1], reverse=True)

    def fallback_english_similarity(self, query: str, candidates: List[Dict]) -> List[Tuple[Dict, float]]:
        """英文备选相似度计算"""
        results = []
        for candidate in candidates:
            candidate_name = candidate.get('品名', '')
            similarity = self.english_text_similarity(query, candidate_name)
            results.append((candidate, similarity))

        return sorted(results, key=lambda x: x[1], reverse=True)

    def search_most_similar(self, product_names: List[str], top_k: int = 5) -> Dict[str, List[Tuple[Dict, float]]]:
        """高效搜索最相似产品"""
        results = {}

        for query in product_names:
            print(f"Processing query: {query}")

            # 1. 快速预筛选
            print("  Step 1: Fast pre-filtering...")
            candidates = self.fast_english_prefilter(query, top_n=50)
            print(f"  Pre-filtered {len(candidates)} candidates")

            if not candidates:
                results[query] = []
                continue

            # 2. Qwen精排
            print("  Step 2: Qwen precise ranking...")
            scored_products = self.qwen_similarity_batch(query, candidates)

            # 3. 取前top_k个
            top_results = scored_products[:top_k]
            results[query] = top_results

            time.sleep(1)

        return results