# backup_infinity.py (最终修复版)

import os
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
from datetime import datetime
import infinity
from infinity.common import NetworkAddress
from tqdm import tqdm
import json

# ==================== 配置 ====================
HOST = "rf-dify.docker.internal"
PORT = 23817
DB_NAME = "default_db"
PAGE_SIZE = 1000
VECTOR_DIM = 1024

SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BACKUP_DIR = os.path.join(
    SCRIPT_DIR,
    "backup",
    f"infinity_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
)
os.makedirs(BACKUP_DIR, exist_ok=True)

# ==================== 工具函数 ====================
def safe_string(val):
    """安全地将任意值转为字符串"""
    if val is None:
        return None
    if isinstance(val, float) and np.isnan(val):
        return None
    try:
        return str(val)
    except:
        return None

def standardize_vector(v, dim=1024, fill_value=0.0):
    """安全标准化向量为 Python list[float]"""
    try:
        if v is None or (isinstance(v, float) and np.isnan(v)):
            return [fill_value] * dim
        if isinstance(v, str):
            v = v.strip("[]")
            if not v:
                return [fill_value] * dim
            v = [float(x.strip()) for x in v.split(",") if x.strip()]
        elif isinstance(v, list):
            v = [float(x) for x in v]
        elif isinstance(v, np.ndarray):
            v = v.astype(float).tolist()
        else:
            try:
                v = list(map(float, v))
            except:
                return [fill_value] * dim

        if len(v) < dim:
            v += [fill_value] * (dim - len(v))
        else:
            v = v[:dim]
        return v
    except Exception:
        return [fill_value] * dim

# ==================== 主逻辑 ====================
try:
    inf = infinity.connect(NetworkAddress(HOST, PORT))
    db = inf.get_database(DB_NAME)

    res = db.list_tables()
    tables = res.remote_table_names if hasattr(res, 'remote_table_names') else res.table_names
    print(f"🔍 找到 {len(tables)} 个表: {tables}")

    schema_data = {}

    for table_name in tables:
        try:
            table = db.get_table(table_name)

            # 获取总行数
            _, extra = table.output(["*"]).limit(1).option({"total_hits_count": True}).to_df()
            total_rows = extra.get("total_hits_count", 0)
            print(f"📊 表 '{table_name}' 共 {total_rows} 行")

            if total_rows == 0:
                continue

            # 获取列名
            schema_df = table.show_columns().to_pandas()
            col_name_key = 'name' if 'name' in schema_df.columns else schema_df.columns[0]
            all_columns = schema_df[col_name_key].tolist()

            print(f"✅ 导出列: {all_columns[:5]}... (共 {len(all_columns)} 列)")
            vector_cols = [c for c in all_columns if 'vec' in c.lower()]
            print(f"📌 向量列: {vector_cols}")

            output_path = os.path.join(BACKUP_DIR, f"{table_name}.parquet")
            writer = None

            # 构建 Arrow schema
            arrow_fields = []
            for col in all_columns:
                if col in vector_cols:
                    arrow_fields.append(pa.field(col, pa.list_(pa.float32())))
                else:
                    arrow_fields.append(pa.field(col, pa.string()))  # 其他列统一为 string
            arrow_schema = pa.schema(arrow_fields)

            with tqdm(total=total_rows, desc=f"📦 备份 {table_name}", unit="行") as pbar:
                offset = 0
                while offset < total_rows:
                    result_data, _ = table.output(all_columns).limit(PAGE_SIZE).offset(offset).to_df()

                    try:
                        # 统一处理 result_data 为 records
                        if hasattr(result_data, 'to_dicts'):
                            rows = result_data.to_dicts()
                        elif hasattr(result_data, 'to_dict'):
                            rows = result_data.to_dict('records')
                        elif isinstance(result_data, list):
                            rows = result_data
                        elif hasattr(result_data, 'rows') and hasattr(result_data, 'columns'):
                            rows = [dict(zip(result_data.columns, row)) for row in result_data.rows()]
                        else:
                            print(f"❌ 无法处理数据类型: {type(result_data)}")
                            break

                        if not rows:
                            break

                        # 安全处理每一行
                        cleaned_rows = []
                        for row in rows:
                            cleaned_row = {}
                            for col in all_columns:
                                val = row.get(col)
                                if col in vector_cols:
                                    cleaned_row[col] = standardize_vector(val, dim=VECTOR_DIM)
                                else:
                                    cleaned_row[col] = safe_string(val)
                            cleaned_rows.append(cleaned_row)

                        # 转为 Arrow Table
                        batch_table = pa.Table.from_pylist(cleaned_rows, schema=arrow_schema)

                        # 写入 Parquet
                        if writer is None:
                            writer = pq.ParquetWriter(output_path, batch_table.schema)
                        writer.write_table(batch_table)
                        pbar.update(len(rows))

                    except Exception as e:
                        print(f"❌ 处理批次失败: {str(e)}")
                        break

                    offset += len(rows)

            if writer:
                writer.close()
            print(f"✅ 成功备份: {table_name} -> {output_path}")

            # 保存 schema
            schema_records = []
            for _, row in schema_df.iterrows():
                schema_records.append({
                    "name": row[col_name_key],
                    "type": str(row.get('type', 'Unknown')),
                    "default": row.get("default", None),
                    "comment": row.get("comment", None),
                })
            schema_data[table_name] = {"fields": schema_records}

        except Exception as e:
            print(f"❌ 处理表 {table_name} 失败: {str(e)}")

    # 保存 schema.json
    schema_path = os.path.join(BACKUP_DIR, "schema.json")
    with open(schema_path, "w", encoding="utf-8") as f:
        json.dump(schema_data, f, indent=2, ensure_ascii=False)
    print(f"📌 Schema 已保存: {schema_path}")

    inf.disconnect()
    print(f"🎉 备份完成: {BACKUP_DIR}")

except Exception as e:
    print(f"❌ 备份失败: {str(e)}")