# src/sync_service.py
import logging

from reportagentic.core.db import DatabaseConnector, MetadataRepository
from reportagentic.core.llm import LLMClient
from reportagentic.core.vector import VectorClient

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


class MetadataService:
    """
    编排元数据同步的整个流程。
    """

    def __init__(self, db_connector: DatabaseConnector,
                 llm_client: LLMClient,
                 meta_repo: MetadataRepository,
                 vector_client: VectorClient):
        self.db_connector = db_connector
        self.llm_client = llm_client
        self.meta_repo = meta_repo
        self.vector_client = vector_client

    def run_sync(self):
        """
        执行同步任务。
        """
        logging.info("Starting metadata synchronization process...")
        try:
            # 1. 从源数据库提取原始Schema
            schema = self.db_connector.get_schema()
            logging.info(f"Found {len(schema)} tables to process.")

            for table_name, table_info in schema.items():
                logging.info(f"--- Processing table: {table_name} ---")

                columns = table_info['columns']
                table_comment = table_info['table_comment']

                # 2. 调用LLM丰富表元数据
                table_enrichment = self.llm_client.enrich_table_metadata(table_name, table_comment, columns)
                if not table_enrichment:
                    logging.warning(f"Could not enrich metadata for table {table_name}. Skipping.")
                    continue

                table_data_to_save = {
                    'table_name': table_name,
                    'semantic_name': table_enrichment.get('semantic_name'),
                    'description': table_enrichment.get('description')
                }

                # 3. 将丰富的表元数据存入元数据库
                table_id = self.meta_repo.save_table(table_data_to_save)
                if not table_id:
                    logging.error(f"Failed to save table {table_name}. Skipping its columns.")
                    continue
                logging.info(f"Saved table '{table_name}' with ID {table_id}.")

                column_ids_for_vector = []
                texts_to_embed = []
                payloads_for_vector = []

                # 4. 遍历并丰富每个列的元数据
                for column in columns:
                    column_enrichment = self.llm_client.enrich_column_metadata(
                        table_name,
                        table_enrichment.get('description', ''),
                        column  # 直接传递整个 column 字典
                    )

                    if not column_enrichment:
                        logging.warning(f"Could not enrich metadata for column {column['column_name']}. Skipping.")
                        continue

                    column_data_to_save = {
                        'table_id': table_id,
                        'column_name': column['column_name'],
                        'data_type': column['data_type'],
                        'is_primary_key': column['is_primary_key'],
                        'semantic_name': column_enrichment.get('semantic_name'),
                        'description': column_enrichment.get('description')
                    }

                    # 5. 将丰富的列元数据存入元数据库
                    column_id = self.meta_repo.save_column(column_data_to_save)
                    logging.info(f"  - Saved column '{column['column_name']}' with ID {column_id}.")

                    # 6. 生成向量并存入向量数据库
                    if texts_to_embed:
                        vectors = self.llm_client.get_embeddings(texts_to_embed)
                        if vectors:
                            self.vector_client.upsert_vectors(
                                collection_name="column_metadata_vectors",
                                ids=column_ids_for_vector,
                                vectors=vectors,
                                payloads=payloads_for_vector
                            )

            logging.info("Metadata synchronization process completed successfully.")

        except Exception as e:
            logging.error(f"An error occurred during synchronization: {e}", exc_info=True)
        finally:
            # 确保所有数据库连接都被关闭
            self.db_connector.close()
            self.meta_repo.close()
