import sqlite3
import torch
import pickle
from bert_finetune.utils import DualRelationProcessor  # 引入之前生成的方法类

def add_and_fill_embedding_column(db_path, model_dir, batch_size=16):
    """
    为scenic_spots表添加embedding列并填充BERT编码结果
    
    Args:
        db_path: 数据库文件路径
        model_dir: 预训练模型目录
        batch_size: 批处理大小，控制内存使用
    """
    # 1. 初始化处理器
    print("加载模型...")
    processor = DualRelationProcessor.from_pretrained(model_dir)
    
    # 2. 连接数据库
    conn = sqlite3.connect(db_path)
    cursor = conn.cursor()
    
    try:
        # 3. 检查并添加embedding列（使用BLOB类型存储二进制数据）
        cursor.execute("PRAGMA table_info(scenic_spots)")
        columns = [col[1] for col in cursor.fetchall()]
        
        if 'embedding' not in columns:
            print("添加embedding列...")
            cursor.execute("ALTER TABLE scenic_spots ADD COLUMN embedding BLOB")
            conn.commit()
        else:
            print("embedding列已存在，将覆盖现有数据")
        
        # 4. 获取所有景点ID和描述
        print("读取景点数据...")
        cursor.execute("SELECT id, info FROM scenic_spots WHERE info IS NOT NULL")
        all_spots = cursor.fetchall()
        total = len(all_spots)
        print(f"共发现 {total} 个有效景点")
        
        # 5. 批量处理并更新embedding
        print("开始生成并存储嵌入向量...")
        for i in range(0, total, batch_size):
            batch = all_spots[i:i+batch_size]
            spot_ids, infos = zip(*batch)
            
            # 生成嵌入向量
            embeddings = []
            for info in infos:
                # 使用景点描述生成嵌入（这里复用景点嵌入方法）
                emb = processor.get_attraction_embedding(info)
                # 转换为CPU张量并序列化
                emb_tensor = emb.cpu()
                embeddings.append(pickle.dumps(emb_tensor))
            
            # 批量更新数据库
            update_sql = """
                UPDATE scenic_spots 
                SET embedding = ? 
                WHERE id = ?
            """
            cursor.executemany(update_sql, zip(embeddings, spot_ids))
            conn.commit()
            
            # 打印进度
            processed = min(i + batch_size, total)
            print(f"已处理 {processed}/{total} 个景点 ({processed/total*100:.1f}%)")
        
        print("所有景点嵌入向量已生成并存储")
    
    except Exception as e:
        print(f"处理过程中出错: {str(e)}")
        conn.rollback()
    finally:
        # 关闭数据库连接
        conn.close()
        print("数据库连接已关闭")

if __name__ == "__main__":
    # 配置路径
    DB_PATH = "jiangsu.db"
    MODEL_DIR = "./bert_finetune/optimized_dual_relation_bert"  # 替换为实际模型目录
    
    # 执行处理
    add_and_fill_embedding_column(
        db_path=DB_PATH,
        model_dir=MODEL_DIR,
        batch_size=16  # 根据内存大小调整
    )
