import numpy as np
from sentence_transformers import SentenceTransformer
from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType, utility
import pandas as pd
from faker import Faker
import uuid
from tqdm import tqdm
import os

class AddressVectorSearch:
    def __init__(self, collection_name='order_addresses'):
        """初始化向量搜索系统"""
        self.collection_name = collection_name
        self.model = SentenceTransformer('BAAI/bge-base-zh-v1.5')
        self.vector_dim = 768  # BGE-base 模型的向量维度
        self.faker = Faker(['zh_CN'])
        
        # 连接到 Milvus Lite
        connections.connect(alias="default", uri="sqlite:///:memory:")
        
        # 确保输出目录存在
        os.makedirs('csv_output', exist_ok=True)
        
        # 初始化集合
        self._init_collection()
    
    def _init_collection(self):
        """初始化 Milvus 集合"""
        if utility.has_collection(self.collection_name):
            utility.drop_collection(self.collection_name)
        
        # 定义字段
        fields = [
            FieldSchema(name="order_id", dtype=DataType.VARCHAR, max_length=36, is_primary=True),
            FieldSchema(name="user_id", dtype=DataType.VARCHAR, max_length=36),
            FieldSchema(name="address", dtype=DataType.VARCHAR, max_length=500),
            FieldSchema(name="address_vector", dtype=DataType.FLOAT_VECTOR, dim=self.vector_dim)
        ]
        
        # 创建集合schema
        schema = CollectionSchema(fields=fields, description="订单地址向量搜索")
        
        # 创建集合
        self.collection = Collection(name=self.collection_name, schema=schema)
        
        # 创建索引
        index_params = {
            "metric_type": "COSINE",
            "index_type": "IVF_FLAT",
            "params": {"nlist": 128}
        }
        self.collection.create_index(field_name="address_vector", index_params=index_params)
    
    def generate_order_addresses(self, num_orders=1000):
        """生成模拟订单数据"""
        orders = []
        
        # 定义常用省份和对应的主要城市
        provinces_cities = {
            '北京市': ['朝阳区', '海淀区', '丰台区', '西城区'],
            '上海市': ['浦东新区', '徐汇区', '长宁区', '静安区'],
            '广东省': {
                '广州市': ['天河区', '越秀区', '海珠区', '白云区'],
                '深圳市': ['南山区', '福田区', '罗湖区', '宝安区']
            }
        }
        
        for _ in tqdm(range(num_orders), desc="生成订单数据"):
            # 生成地址
            province = np.random.choice(list(provinces_cities.keys()))
            if isinstance(provinces_cities[province], list):
                city = province
                district = np.random.choice(provinces_cities[province])
            else:
                city = np.random.choice(list(provinces_cities[province].keys()))
                district = np.random.choice(provinces_cities[province][city])
            
            street = f"{np.random.choice(['人民路', '中山路', '建设路'])}{np.random.randint(1, 999)}号"
            building = f"{np.random.choice(['金色', '银座', '环球'])}大厦{np.random.randint(1, 50)}号"
            
            address = f"{province}{city if city != province else ''}{district}{street}{building}"
            
            orders.append({
                'order_id': str(uuid.uuid4()),
                'user_id': str(uuid.uuid4()),
                'address': address
            })
        
        return orders
    
    def insert_orders(self, orders):
        """将订单数据插入到 Milvus"""
        # 准备数据
        order_ids = [order['order_id'] for order in orders]
        user_ids = [order['user_id'] for order in orders]
        addresses = [order['address'] for order in orders]
        
        # 向量化地址
        print("向量化地址...")
        vectors = self.model.encode(addresses)
        
        # 插入数据
        entities = [
            order_ids,
            user_ids,
            addresses,
            vectors.tolist()
        ]
        
        self.collection.insert(entities)
        self.collection.flush()
        print(f"成功插入 {len(orders)} 条数据")
    
    def search_similar_addresses(self, query_address, similarity_threshold=0.85, limit=10):
        """搜索相似地址"""
        # 向量化查询地址
        query_vector = self.model.encode([query_address])[0]
        
        # 搜索
        self.collection.load()
        search_params = {"metric_type": "COSINE", "params": {"nprobe": 10}}
        results = self.collection.search(
            data=[query_vector],
            anns_field="address_vector",
            param=search_params,
            limit=limit,
            output_fields=["order_id", "user_id", "address"]
        )
        
        # 处理结果
        similar_orders = []
        for hits in results:
            for hit in hits:
                if hit.score >= similarity_threshold:  # score 是相似度
                    similar_orders.append({
                        'order_id': hit.entity.get('order_id'),
                        'user_id': hit.entity.get('user_id'),
                        'address': hit.entity.get('address'),
                        'similarity': hit.score
                    })
        
        return similar_orders

def main():
    # 初始化系统
    search_system = AddressVectorSearch()
    
    # 生成并插入模拟数据
    print("生成模拟订单数据...")
    orders = search_system.generate_order_addresses(num_orders=1000)
    search_system.insert_orders(orders)
    
    # 示例查询
    test_addresses = [
        "北京市海淀区中关村大街1号科技大厦",
        "上海市浦东新区张江高科技园区88号",
        "广东省深圳市南山区科技园科发路8号"
    ]
    
    # 对每个测试地址进行相似度搜索
    for query_address in test_addresses:
        print(f"\n查询地址: {query_address}")
        similar_orders = search_system.search_similar_addresses(
            query_address,
            similarity_threshold=0.85,
            limit=10
        )
        
        # 将结果保存为CSV
        if similar_orders:
            df = pd.DataFrame(similar_orders)
            filename = f'csv_output/similar_addresses_{hash(query_address)}.csv'
            df.to_csv(filename, index=False, encoding='utf-8')
            print(f"找到 {len(similar_orders)} 个相似地址，已保存到 {filename}")
            print("\n相似地址前5个:")
            print(df.head().to_string())
        else:
            print("未找到相似度超过85%的地址")

if __name__ == "__main__":
    main() 