class EnglishVectorSearch:
    def __init__(self, db_path: str):
        self.db_path = db_path
        self.connection_string = f'DRIVER={{Microsoft Access Driver (*.mdb, *.accdb)}};DBQ={db_path};'
        self.vectorizer = None
        self.product_vectors = None
        self.products = []

        self.load_or_build_vectors()

    def load_or_build_vectors(self):
        """加载或构建英文向量索引"""
        cache_file = "english_product_vectors.pkl"

        if os.path.exists(cache_file):
            print("Loading cached English vector index...")
            with open(cache_file, 'rb') as f:
                data = pickle.load(f)
                self.vectorizer = data['vectorizer']
                self.product_vectors = data['vectors']
                self.products = data['products']
        else:
            print("Building English vector index...")
            self.build_english_vector_index()

    def build_english_vector_index(self):
        """构建英文TF-IDF向量索引"""
        self.products = self.get_all_products()
        product_names = [p.get('品名', '') for p in self.products]

        # 英文专用的TF-IDF配置
        self.vectorizer = TfidfVectorizer(
            max_features=10000,
            min_df=2,
            max_df=0.8,
            stop_words='english',  # 使用英文停用词
            ngram_range=(1, 3),  # 支持1-3个词的组合
            lowercase=True,
            analyzer='word'
        )

        self.product_vectors = self.vectorizer.fit_transform(product_names)

        # 缓存结果
        cache_file = "english_product_vectors.pkl"
        with open(cache_file, 'wb') as f:
            pickle.dump({
                'vectorizer': self.vectorizer,
                'vectors': self.product_vectors,
                'products': self.products
            }, f)

        print(f"English vector index built: {len(self.products)} products")

    def search_similar_english(self, query: str, top_k: int = 10) -> List[Tuple[Dict, float]]:
        """英文向量相似度搜索"""
        if not self.vectorizer or self.product_vectors is None:
            return []

        query_vector = self.vectorizer.transform([query])
        similarities = cosine_similarity(query_vector, self.product_vectors).flatten()

        top_indices = np.argsort(similarities)[::-1][:top_k]

        results = []
        for idx in top_indices:
            if similarities[idx] > 0.1:  # 设置相似度阈值
                results.append((self.products[idx], similarities[idx]))

        return results

    def get_all_products(self) -> List[Dict]:
        """获取所有产品数据"""
        try:
            conn = pyodbc.connect(self.connection_string)
            cursor = conn.cursor()
            cursor.execute("SELECT * FROM 清关资料")
            columns = [column[0] for column in cursor.description]

            results = []
            for row in cursor.fetchall():
                row_dict = dict(zip(columns, row))
                results.append(row_dict)

            conn.close()
            return results
        except Exception as e:
            print(f"Database query failed: {e}")
            return []