# -*- coding: utf-8 -*-
"""
导入resources/files目录下所有包含{hash}_results.json文件的文件夹数据
将数据保存到MySQL，图片保存到MySQL，数据刷新保存到ES

用法示例：
  python scripts/import_results_json.py \
    --root "resources/files"
"""
import argparse
import json
import os
import sys
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
from typing import List, Dict, Optional, Tuple
from urllib.parse import unquote
from uuid import uuid4

# 兼容直接运行
PROJECT_ROOT = Path(__file__).resolve().parents[1]
if str(PROJECT_ROOT) not in sys.path:
    sys.path.append(str(PROJECT_ROOT))

from loguru import logger
from langchain_core.documents import Document

from configs.kb_config import ES_KB_INDEX_NAME, ES_KB_EMERGENCY_NAME, ES_URL, ES_USER, ES_PASSWORD
from db.repository.qa_file import (
    db_get_qa_file_by_name,
    db_add_qa_file,
    db_update_qa_file_scope_gb,
    db_update_qa_file_attributes,
    db_add_qa_attach,
    db_list_qa_attaches_by_file_id,
)
from utils.file_utils import to_rel_path, to_abs_path, download_attachment
from utils.documents_utils import create_documents_from_pdf
from service.es_service import add_documents_to_es, kb_vector_store, kb_vector_store_emergency
from service.embeddings import embeddings
from elasticsearch.helpers import bulk


def find_results_json_folders(root: Path) -> List[Tuple[Path, Path]]:
    """
    查找所有包含{hash}_results.json文件的文件夹
    
    返回列表项为 (folder_path, json_file_path)
    """
    folders = []
    for json_file in root.rglob('*_results.json'):
        if json_file.is_file():
            folder_path = json_file.parent
            folders.append((folder_path, json_file))
    
    return folders


def load_results_json(json_path: Path) -> Optional[Dict]:
    """加载并解析results.json文件"""
    try:
        with open(json_path, 'r', encoding='utf-8') as f:
            data = json.load(f)
        return data
    except Exception as e:
        logger.error(f"Failed to load JSON file {json_path}: {e}")
        return None


def find_pdf_file(folder_path: Path, hash_name: str) -> Optional[Path]:
    """在文件夹中查找PDF文件"""
    pdf_path = folder_path / f"{hash_name}.pdf"
    if pdf_path.exists():
        return pdf_path
    
    # 如果没有找到，尝试查找文件夹中的任何PDF文件
    for pdf_file in folder_path.glob("*.pdf"):
        return pdf_file
    
    return None


def find_image_files(folder_path: Path) -> List[Path]:
    """查找文件夹中的所有图片文件"""
    image_extensions = {'.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp'}
    images = []
    for img_file in folder_path.iterdir():
        if img_file.is_file() and img_file.suffix.lower() in image_extensions:
            images.append(img_file)
    return images


def batch_add_documents_to_es(vector_store, documents: List[Document], index_name: str, batch_size: int = 100) -> List[str]:
    """
    批量添加文档到ES，优化性能
    
    Args:
        vector_store: ElasticsearchStore实例
        documents: 文档列表
        index_name: ES索引名称
        batch_size: 每批处理的文档数量（默认100）
    
    Returns:
        插入的文档ID列表
    """
    if not documents:
        return []
    
    insert_ids = []
    
    # 分批处理文档
    total_batches = (len(documents) + batch_size - 1) // batch_size
    
    for i in range(0, len(documents), batch_size):
        batch_docs = documents[i:i + batch_size]
        batch_num = i // batch_size + 1
        
        logger.info(f"ES batch {batch_num}/{total_batches}: Processing {len(batch_docs)} documents...")
        
        try:
            # 批量计算向量
            texts = [doc.page_content for doc in batch_docs]
            
            # 批量生成向量
            try:
                vectors = embeddings.embed_documents(texts)
                logger.debug(f"Batch {batch_num}: Generated {len(vectors)} vectors")
            except Exception as e:
                logger.error(f"Failed to embed documents batch {batch_num}: {e}")
                # 如果批量向量计算失败，回退到单条计算
                logger.info("Falling back to single document embedding...")
                vectors = []
                for doc in batch_docs:
                    try:
                        vec = embeddings.embed_query(doc.page_content)
                        vectors.append(vec)
                    except Exception as e2:
                        logger.error(f"Failed to embed single document: {e2}")
            
            # 准备批量插入的actions
            actions = []
            for idx, (doc, vector) in enumerate(zip(batch_docs, vectors)):
                doc_id = str(uuid4())
                insert_ids.append(doc_id)
                
                # 构建ES文档
                es_doc = {
                    "text": doc.page_content,
                    "metadata": doc.metadata,
                    "vector": vector
                }
                
                action = {
                    "_index": index_name,
                    "_id": doc_id,
                    "_source": es_doc
                }
                actions.append(action)
            
            # 使用bulk API批量插入
            success_count, failed_items = bulk(
                vector_store.client,
                actions,
                chunk_size=batch_size,
                request_timeout=120  # 增加超时时间
            )
            
            if failed_items:
                logger.warning(f"Batch {batch_num}: {len(failed_items)} documents failed to index")
                for item in failed_items[:5]:  # 只显示前5个错误
                    logger.error(f"Failed item: {item}")
            
            logger.info(f"Batch {batch_num}/{total_batches}: Successfully indexed {success_count} documents")
            
        except Exception as e:
            logger.error(f"Failed to process batch {batch_num}: {e}")
            import traceback
            logger.error(traceback.format_exc())
            # 如果批量插入失败，回退到单条插入
            logger.info(f"Falling back to single document insertion for batch {batch_num}...")
            for doc in batch_docs:
                try:
                    doc_id = str(uuid4())
                    insert_ids.append(doc_id)
                    vector_store.add_documents(documents=[doc], ids=[doc_id])
                except Exception as e2:
                    logger.error(f"Failed to insert single document: {e2}")
    
    logger.info(f"Completed ES indexing: {len(insert_ids)} documents total")
    return insert_ids


def process_attachments_from_results(results_data: Dict, file_obj, folder_path: Path, max_download_retries: int = 3, max_workers: int = 10) -> List[Dict]:
    """
    从results.json中的attachments处理图片附件（使用并行下载）
    
    处理逻辑：
    1. 优先检查数据库中是否有记录，如果有且本地文件存在，直接使用
    2. 如果数据库没有记录，检查本地文件是否存在
    3. 如果本地文件存在，保存到数据库（不需要下载）
    4. 如果本地文件不存在，则并行下载，并保存到数据库
    
    Args:
        results_data: results.json文件的数据
        file_obj: 文件对象（用于关联附件）
        folder_path: 文件夹路径（用于查找本地图片）
        max_download_retries: 下载失败时的最大重试次数
        max_workers: 并行下载的最大线程数
    
    Returns:
        处理后的附件列表
    """
    processed_attachments = []
    download_tasks = []  # 需要下载的图片任务列表
    
    # 获取JSON中的attachments
    attachments = results_data.get("attachments", []) or []
    
    # 获取数据库中已有的附件记录
    existing_by_link = {}
    existing_by_name = {}
    if file_obj:
        try:
            db_atts = db_list_qa_attaches_by_file_id(file_obj.id)
            for a in db_atts:
                if getattr(a, 'download_link', None):
                    existing_by_link[a.download_link] = a
                if getattr(a, 'origin_name', None):
                    existing_by_name[a.origin_name] = a
        except Exception as e:
            logger.warning(f"Failed to get existing attachments from DB: {e}")
    
    # 查找文件夹中的图片文件（建立文件名到路径的映射）
    image_files = find_image_files(folder_path)
    image_files_dict = {img.name: img for img in image_files}
    
    # 第一步：处理所有附件，收集需要下载的任务
    for att in attachments:
        att_type = att.get("type", "")
        
        # 非图片类型的附件（如表格）直接添加，不做处理
        if att_type not in ("image", "image_in_table"):
            processed_attachments.append(dict(att))
            continue
        
        # 获取图片URL和原始名称
        image_url = att.get("image_url", "")
        origin_name = os.path.basename(image_url) if image_url else att.get("id", "")
        
        if not image_url:
            logger.warning(f"Attachment has no image_url, skipping: {att.get('id', 'unknown')}")
            processed_attachments.append(dict(att))
            continue
        
        new_att = dict(att)
        
        # ========== 检查数据库中是否已有记录 ==========
        db_hit = None
        if image_url in existing_by_link:
            db_hit = existing_by_link[image_url]
        elif origin_name in existing_by_name:
            db_hit = existing_by_name[origin_name]
        
        if db_hit:
            # 数据库已有记录，检查本地文件是否存在
            loc = getattr(db_hit, 'location', '')
            if loc:
                abs_path = to_abs_path(loc)
                if os.path.exists(abs_path):
                    # 本地文件存在，直接使用数据库记录
                    new_att["local_save_path"] = loc
                    new_att["id"] = getattr(db_hit, 'origin_name', origin_name) or origin_name
                    processed_attachments.append(new_att)
                    logger.debug(f"Using existing DB record: {origin_name}")
                    continue
                else:
                    # 数据库有记录但本地文件不存在，尝试在文件夹中查找或重新下载
                    logger.warning(f"DB record exists but local file not found: {loc}, will try to find or download")
        
        # ========== 尝试在本地文件夹中查找图片文件 ==========
        local_image_path = None
        
        # 方法1: 通过origin_name精确匹配
        if origin_name and origin_name in image_files_dict:
            local_image_path = image_files_dict[origin_name]
        
        # 方法2: 通过image_url的文件名匹配（支持URL编码）
        if not local_image_path and image_url:
            url_filename = os.path.basename(image_url)
            try:
                url_filename_decoded = unquote(url_filename)
                if url_filename_decoded != url_filename and url_filename_decoded in image_files_dict:
                    local_image_path = image_files_dict[url_filename_decoded]
                elif url_filename in image_files_dict:
                    local_image_path = image_files_dict[url_filename]
            except Exception:
                if url_filename in image_files_dict:
                    local_image_path = image_files_dict[url_filename]
        
        if local_image_path and local_image_path.exists():
            # 本地文件存在，保存到数据库
            rel_path = to_rel_path(str(local_image_path))
            new_att["id"] = local_image_path.name
            new_att["local_save_path"] = rel_path
            
            # 如果数据库没有记录，添加记录
            if not db_hit and file_obj:
                try:
                    file_size = os.path.getsize(str(local_image_path))
                    db_add_qa_attach(new_att["id"], file_size, rel_path, file_obj.id, image_url)
                    logger.info(f"Added attachment to DB (from local): {origin_name} -> {rel_path}")
                except Exception as e:
                    logger.warning(f"Failed to save attachment to DB: {e}")
            
            processed_attachments.append(new_att)
            logger.debug(f"Using local file: {origin_name}")
        else:
            # 本地文件不存在，添加到下载任务列表
            download_tasks.append({
                'att': new_att,
                'image_url': image_url,
                'origin_name': origin_name,
                'save_dir': str(folder_path),
                'file_obj': file_obj
            })
    
    # 第二步：并行下载所有需要下载的图片
    if download_tasks:
        logger.info(f"Starting parallel download of {len(download_tasks)} images with {max_workers} workers...")
        
        def download_and_save(task_info):
            """下载并保存单个图片的辅助函数"""
            att = task_info['att']
            image_url = task_info['image_url']
            origin_name = task_info['origin_name']
            save_dir = task_info['save_dir']
            file_obj = task_info['file_obj']
            
            try:
                logger.info(f"Downloading image: {origin_name} from {image_url}")
                download_success, download_path = download_attachment(
                    image_url,
                    save_dir,
                    max_retries=max_download_retries
                )
                
                if download_success and download_path:
                    # 下载成功，保存到数据库
                    rel_path = to_rel_path(download_path)
                    att["id"] = os.path.basename(download_path)
                    att["local_save_path"] = rel_path
                    
                    if file_obj:
                        try:
                            file_size = os.path.getsize(download_path)
                            db_add_qa_attach(att["id"], file_size, rel_path, file_obj.id, image_url)
                            logger.info(f"Downloaded and saved attachment to DB: {origin_name} -> {rel_path}")
                        except Exception as e:
                            logger.warning(f"Failed to save downloaded attachment to DB: {e}")
                    
                    return {'success': True, 'att': att, 'origin_name': origin_name}
                else:
                    logger.error(f"Failed to download image: {origin_name} from {image_url}")
                    return {'success': False, 'att': att, 'origin_name': origin_name}
            except Exception as e:
                logger.error(f"Error downloading image {origin_name}: {e}")
                return {'success': False, 'att': att, 'origin_name': origin_name}
        
        # 使用线程池并行下载
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            # 提交所有下载任务
            future_to_task = {
                executor.submit(download_and_save, task): task 
                for task in download_tasks
            }
            
            # 收集结果
            for future in as_completed(future_to_task):
                try:
                    result = future.result()
                    processed_attachments.append(result['att'])
                    if result['success']:
                        logger.debug(f"Successfully processed: {result['origin_name']}")
                except Exception as e:
                    task = future_to_task[future]
                    logger.error(f"Task failed for {task['origin_name']}: {e}")
                    processed_attachments.append(task['att'])
        
        logger.info(f"Completed parallel download of {len(download_tasks)} images")
    
    return processed_attachments


def process_folder(folder_path: Path, json_path: Path, user_id: int = 1) -> Tuple[bool, str]:
    """
    处理单个文件夹
    
    返回 (success, message)
    """
    try:
        # 1. 加载JSON文件
        results_data = load_results_json(json_path)
        if not results_data:
            return False, f"Failed to load results JSON"
        
        if not results_data.get("success"):
            return False, f"Results JSON indicates unsuccessful processing"
        
        # 2. 获取hash名称（从文件夹名或JSON文件名提取）
        hash_name = folder_path.name + '.pdf'
        
        # 3. 查找PDF文件
        pdf_path = find_pdf_file(folder_path, hash_name)
        if not pdf_path:
            return False, f"PDF file not found in folder {folder_path}"
        
        # 4. 获取文件信息
        filename = results_data.get("filename", hash_name)
        gb_number = results_data.get("gb_number", "")
        scope = results_data.get("scope", "")
        category = results_data.get("category", "")
        
        # 5. 检查文件是否已存在
        existing_file = db_get_qa_file_by_name(hash_name)
        if existing_file and not existing_file.is_delete:
            file_obj = existing_file
            logger.info(f"File already exists in database: {hash_name}")

        if not file_obj:
            return False, "Failed to save file to database"

        # 7. 处理附件（图片），使用并行下载
        processed_attachments = process_attachments_from_results(
            results_data, 
            file_obj, 
            folder_path,
            max_download_retries=3,
            max_workers=10  # 并行下载的线程数
        )
        
        # 8. 准备ES文档数据
        # 需要将results.json的格式转换为pdf_metadata格式
        pdf_metadata = {
            "filename": filename,
            "gb_number": gb_number,
            "scope": scope,
            "category": category,
            "main_text": results_data.get("main_text", []),
            "attachments": processed_attachments,
        }
        
        # 9. 创建文档对象
        documents = create_documents_from_pdf(pdf_metadata, file_obj, processed_attachments)
        
        # 10. 确定文件类型并选择对应的ES索引
        vector_store = kb_vector_store
        
        # 11. 删除ES中已有的该文件数据（如果存在）
        try:
            from service.es_service import ELASTICSEARCH, ELASTICSEARCH_EMERGENCY
            es_client = ELASTICSEARCH
            index_name = ES_KB_INDEX_NAME
            
            query = {
                "query": {
                    "term": {
                        "metadata.file_id.keyword": str(file_obj.id)
                    }
                }
            }
            es_client.delete_doc_by_query(query=query, idxnm=index_name)
            logger.info(f"Deleted existing ES docs for file_id: {file_obj.id}")
        except Exception as e:
            logger.warning(f"Failed to delete existing ES docs: {e}")
        
        # 12. 批量保存到ES（使用优化的批量插入方法）
        if documents:
            try:
                # 使用批量插入优化性能
                insert_ids = batch_add_documents_to_es(vector_store, documents, ES_KB_INDEX_NAME)
                logger.info(f"Added {len(insert_ids)} documents to ES for file: {filename}")
                return True, f"Successfully processed: {len(documents)} documents indexed"
            except Exception as e:
                logger.error(f"Failed to batch add documents to ES: {e}")
                # 如果批量插入失败，回退到单条插入
                logger.info("Falling back to single document insertion...")
                try:
                    insert_ids = add_documents_to_es(vector_store, documents)
                    logger.info(f"Added {len(insert_ids)} documents to ES (fallback method) for file: {filename}")
                    return True, f"Successfully processed: {len(documents)} documents indexed (fallback)"
                except Exception as e2:
                    logger.error(f"Fallback insertion also failed: {e2}")
                    return False, f"Failed to add documents to ES: {e2}"
        else:
            logger.warning(f"No documents created for file: {filename}")
            return True, "File saved but no documents created"
        
    except Exception as e:
        logger.error(f"Error processing folder {folder_path}: {e}")
        import traceback
        logger.error(traceback.format_exc())
        return False, f"Error: {str(e)}"


def main():
    parser = argparse.ArgumentParser(
        description="导入resources/files目录下所有包含{hash}_results.json文件的文件夹数据"
    )
    parser.add_argument('--root', type=str, default='resources/files', 
                       help='根目录路径（默认: resources/files）')
    parser.add_argument('--user-id', type=int, default=1,
                       help='创建文件的用户ID（默认: 1）')
    args = parser.parse_args()
    
    root = Path(args.root).expanduser().resolve()
    if not root.exists():
        logger.error(f"Root directory does not exist: {root}")
        sys.exit(1)
    
    # 查找所有包含results.json的文件夹
    folders = find_results_json_folders(root)
    total = len(folders)
    
    if total == 0:
        logger.warning(f"No folders with *_results.json found in {root}")
        sys.exit(0)
    
    logger.info(f"Found {total} folders with results.json files")
    
    success_count = 0
    failed_count = 0
    
    for idx, (folder_path, json_path) in enumerate(folders, start=1):
        logger.info(f"[{idx}/{total}] Processing: {folder_path.name}")
        success, message = process_folder(folder_path, json_path, user_id=args.user_id)
        
        if success:
            success_count += 1
            logger.info(f"  ✓ {message}")
        else:
            failed_count += 1
            logger.error(f"  ✗ {message}")
    
    logger.info("\n" + "="*50)
    logger.info("Summary:")
    logger.info(f"  Total:   {total}")
    logger.info(f"  Success: {success_count}")
    logger.info(f"  Failed:  {failed_count}")
    logger.info("="*50)


if __name__ == '__main__':
    main()

