import logging
from concurrent.futures import ThreadPoolExecutor, as_completed

from app.config import settings
from app.llm.llm_utils import call_knowledge_base, analyze_with_llm
from app.utils.db_utils import get_db_connection
from app.utils.email_utils import send_email
from app.utils.excel_utils import load_excel_to_db, export_processed_file_to_excel


def call_knowledge(conn, file_id: int, knowledge: dict):
    """
    Fetches pending tasks, calls the knowledge base concurrently, and updates tasks to await LLM analysis.
    """
    batch_size = settings.processing_batch_size
    kb_max_workers = settings.kb_thread_pool_max_worker
    logging.info(f"--- [Stage 3.1] Batch processing data and querying knowledge base (concurrent, max_workers={kb_max_workers}) ---")
    while True:
        cursor = conn.cursor(dictionary=True)
        # Fetch tasks that are ready for knowledge base processing
        cursor.execute(
            "SELECT id, description FROM file_contents WHERE status = 'PENDING' AND file_id = %s LIMIT %s",
            (file_id, batch_size),
        )
        tasks = cursor.fetchall()
        if not tasks:
            cursor.close()
            logging.info("No more PENDING tasks to query from the knowledge base for this file.")
            break

        task_ids = [task["id"] for task in tasks]
        if not task_ids:
            cursor.close()
            continue

        format_strings = ",".join(["%s"] * len(task_ids))
        # Update status to indicate knowledge base processing has started
        cursor.execute(
            f"UPDATE file_contents SET status = 'PROCESSING_KB' WHERE id IN ({format_strings})",
            tuple(task_ids),
        )
        conn.commit()
        cursor.close()

        logging.info(f"Processing {len(tasks)} tasks for knowledge base query with concurrent pool (max_workers={kb_max_workers})...")
        
        # Use ThreadPoolExecutor to concurrently call knowledge base
        results_to_update = []
        with ThreadPoolExecutor(max_workers=kb_max_workers) as executor:
            # Submit all tasks to the executor
            future_to_task = {executor.submit(call_knowledge_base, task["description"], knowledge): task for task in tasks}
            
            # Collect results as they complete
            for future in as_completed(future_to_task):
                task = future_to_task[future]
                task_id = task["id"]
                description = task["description"]
                try:
                    kb_answer = future.result()
                    results_to_update.append({
                        "task_id": task_id,
                        "kb_answer": kb_answer,
                        "status": "PENDING_LLM",
                        "error": None
                    })
                    logging.info(f"    > Knowledge base query for task (DB ID: {task_id}) complete.")
                except Exception as e:
                    logging.error(f"Error calling knowledge base for task (DB ID: {task_id}): {e}", exc_info=True)
                    # If KB call fails, mark the task as FAILED
                    results_to_update.append({
                        "task_id": task_id,
                        "kb_answer": None,
                        "status": "FAILED",
                        "error": "Failed during knowledge base query."
                    })

        # Update database with results
        if results_to_update:
            logging.info(f"Updating {len(results_to_update)} tasks in the database...")
            update_cursor = conn.cursor()
            for res in results_to_update:
                task_id = res['task_id']
                status = res['status']
                if status == 'PENDING_LLM':
                    update_cursor.execute(
                        """
                        UPDATE file_contents 
                        SET kb_answer=%s, status=%s
                        WHERE id=%s
                        """,
                        (res['kb_answer'], status, task_id),
                    )
                else:  # FAILED
                    update_cursor.execute(
                        "UPDATE file_contents SET status=%s, summary=%s WHERE id=%s",
                        (status, res['error'], task_id)
                    )
            conn.commit()
            update_cursor.close()
            logging.info("Database update batch complete.")

    logging.info(f"All knowledge base queries for file ID {file_id} are complete.")


def _analyze_task_with_llm(task: dict, language: str) -> dict:
    """
    Helper function to call the LLM for a single task and return the result.
    This function is designed to be run in a separate thread from the global pool.
    """
    task_id = task["id"]
    description = task["description"]
    kb_answer = task["kb_answer"]

    try:
        support, confidence, source, summary = analyze_with_llm(
            description, kb_answer, language
        )
        status = "COMPLETED" if support != "Error" else "FAILED"
        return {
            "task_id": task_id,
            "support": support,
            "confidence": confidence,
            "source": source,
            "summary": summary,
            "status": status
        }
    except Exception as e:
        logging.error(f"Error during LLM analysis for task (DB ID: {task_id}): {e}", exc_info=True)
        return {
            "task_id": task_id,
            "summary": "Failed during LLM analysis.",
            "status": "FAILED"
        }


def llm_answer(conn, file_id: int, llm_executor: ThreadPoolExecutor):
    """
    Fetches tasks and uses the provided concurrent pool to have the LLM generate the final analysis.
    """
    batch_size = settings.processing_batch_size
    logging.info(f"--- [Stage 3.2] Generating final answers with LLM (using provided pool) ---")

    # First, get the language for the file.
    cursor = conn.cursor(dictionary=True)
    cursor.execute("SELECT language FROM source_files WHERE id = %s", (file_id,))
    row = cursor.fetchone()
    cursor.close()
    language = row["language"] if row and row["language"] else "English"

    while True:
        cursor = conn.cursor(dictionary=True)
        # Fetch tasks that are ready for LLM processing.
        cursor.execute(
            "SELECT id, description, kb_answer FROM file_contents WHERE status = 'PENDING_LLM' AND file_id = %s LIMIT %s",
            (file_id, batch_size),
        )
        tasks = cursor.fetchall()
        if not tasks:
            cursor.close()
            logging.info("No more tasks awaiting LLM analysis for this file.")
            break

        task_ids = [task["id"] for task in tasks]
        if not task_ids:
            cursor.close()
            continue

        format_strings = ",".join(["%s"] * len(task_ids))
        # Update status to indicate LLM processing has started.
        cursor.execute(
            f"UPDATE file_contents SET status = 'PROCESSING_LLM' WHERE id IN ({format_strings})",
            tuple(task_ids),
        )
        conn.commit()
        cursor.close()

        logging.info(f"Submitting {len(tasks)} tasks to the LLM thread pool...")

        results_to_update = []
        # Use the executor passed as an argument.
        future_to_task = {llm_executor.submit(_analyze_task_with_llm, task, language): task for task in tasks}

        for future in as_completed(future_to_task):
            task = future_to_task[future]
            try:
                result = future.result()
                results_to_update.append(result)
            except Exception as exc:
                task_id = task['id']
                logging.error(f'Task (DB ID: {task_id}) generated an unexpected exception in thread: {exc}',
                              exc_info=True)
                results_to_update.append({
                    "task_id": task_id,
                    "summary": f"Failed with exception: {exc}",
                    "status": "FAILED",
                })

        # Now, update the database with the results in the main thread.
        if results_to_update:
            logging.info(f"Updating {len(results_to_update)} tasks in the database...")
            update_cursor = conn.cursor()
            for res in results_to_update:
                task_id = res['task_id']
                status = res['status']
                if status == 'COMPLETED':
                    update_cursor.execute(
                        """
                        UPDATE file_contents 
                        SET support_status=%s, confidence=%s, source_documents=%s, summary=%s, status=%s
                        WHERE id=%s
                        """,
                        (res['support'], res['confidence'], res['source'], res['summary'], status, task_id),
                    )
                    logging.info(f"    > Task (DB ID: {task_id}) completed, status: {status}")
                else:  # FAILED
                    update_cursor.execute(
                        "UPDATE file_contents SET status=%s, summary=%s WHERE id=%s",
                        (status, res['summary'], task_id)
                    )
                    logging.error(f"    > Task (DB ID: {task_id}) failed, status: {status}")
            conn.commit()
            update_cursor.close()
            logging.info("Database update batch complete.")

    logging.info(f"All LLM analysis for file ID {file_id} is complete.")


def send_plan_email(conn, file_id, sender_email):
    logging.info(f"--- [Stage 2] Send plan email to user: {sender_email} ---")
    cursor = conn.cursor(dictionary=True)
    cursor.execute(
        "SELECT total_rows, filename FROM source_files WHERE id = %s", (file_id,)
    )
    row = cursor.fetchone()
    cursor.close()
    total_rows = row["total_rows"] if row else 0
    filename = row["filename"] if row else ""
    if total_rows > 0:
        estimated_minutes = round(total_rows / 10)
        if estimated_minutes < 1:
            estimated_minutes = 1
        email_subject_start = f"Your file '{filename}' has started processing"
        email_body_start = f"""Hello,\n\nYour uploaded file '{filename}' has been successfully received and processing has started.\n\n- Total number of questions to process: {total_rows}\n- Estimated completion time: about {estimated_minutes} minutes\n\nWe will notify you again by email once processing is complete.\n\nBest regards,\nAutomated Response System\n            """
        send_email(
            to_email=sender_email,
            subject=email_subject_start,
            body=email_body_start,
        )
    else:
        logging.warning(f"No data rows to process found in file '{filename}'.")
        email_subject_empty = f"File processing notice: {filename}"
        email_body_empty = f"""Hello,\n\nWe have checked your uploaded file '{filename}', but did not find any data rows to analyze.\n\nPlease check your file content and format.\n\nBest regards,\nAutomated Response System\n            """
        send_email(
            to_email=sender_email,
            subject=email_subject_empty,
            body=email_body_empty,
        )


def send_result_email(conn, file_id, sender_email):
    logging.info(f"--- [Stage 5] send result to user: {sender_email} ---")
    cursor = conn.cursor(dictionary=True)
    cursor.execute(
        "SELECT result_filename, filename, result_file_url FROM source_files WHERE id = %s",
        (file_id,),
    )
    row = cursor.fetchone()
    cursor.close()
    result_filename = row["result_filename"] if row else None
    filename = row["filename"] if row else ""
    result_file_url = row["result_file_url"] if row else None
    email_subject_end = f"Your file '{filename}' has been processed"
    if result_filename and result_file_url:
        email_body_end = f"""Hello,\n\nYour uploaded file '{filename}' has been successfully processed.\n\nThe results have been generated as '{result_filename}'.\nYou can download the result file here: {result_file_url}\n\nBest regards,\nAutomated Response System\n        """
    else:
        email_body_end = f"""Hello,\n\nYour uploaded file '{filename}' has been successfully processed.\n\nThe results have been generated and you can view or download them in the system.\n\nBest regards,\nAutomated Response System\n        """
    send_email(to_email=sender_email, subject=email_subject_end, body=email_body_end)


def reset_content_status(db_connection, file_id):
    logging.info(f"reset content status for file id: {file_id}")
    cursor = db_connection.cursor()
    cursor.execute(
        "UPDATE file_contents SET support_status=%s, confidence=%s, source_documents=%s, summary=%s, status=%s WHERE file_id=%s and status=%s and support_status=%s",
        (None, None, None, None, 'PENDING', file_id, "COMPLETED", "No"),
    )
    db_connection.commit()
    cursor.close()


def process_file_task(
        file_path: str, sender_email: str, tmp_subdir: str, language: str, llm_executor: ThreadPoolExecutor
):
    """
    The main background task function, now accepts an executor instance.
    """
    logging.info(
        f"Background task started: processing file '{file_path}', request from '{sender_email}', language='{language}'."
    )
    db_connection = None
    try:
        db_connection = get_db_connection()

        # Stage 1: Load Excel and initialize tasks
        file_id = load_excel_to_db(
            db_connection, file_path, tmp_path=tmp_subdir, language=language
        )

        # Stage 2: Send initial email
        send_plan_email(db_connection, file_id, sender_email)

        # Stage 3: Process file contents
        for index, knowledge in enumerate(settings.knowledge):
            if index != 0:
                reset_content_status(db_connection, file_id)
            # Stage 3.1: Query Knowledge Base
            call_knowledge(db_connection, file_id, knowledge)
            # Stage 3.2: Analyze with LLM, passing the executor
            llm_answer(db_connection, file_id, llm_executor)

        # Stage 4: Export results to Excel
        export_processed_file_to_excel(db_connection, file_id)

        # Stage 5: Send final result email
        send_result_email(db_connection, file_id, sender_email)


    except Exception as e:
        logging.error(
            f"Unexpected exception occurred while processing file '{file_path}': {e}",
            exc_info=True,
        )
        email_subject_fail = f"File processing exception: {file_path}"
        email_body_fail = f"""Hello,\n\nAn unexpected system error occurred while processing your uploaded file '{file_path}'. Our technical team has been notified and will address the issue as soon as possible.\n\nWe apologize for the inconvenience.\n\nBest regards,\nAutomated Response System\n        """
        send_email(
            to_email=sender_email, subject=email_subject_fail, body=email_body_fail
        )
    finally:
        if db_connection and db_connection.is_connected():
            db_connection.close()
            logging.info("Database connection closed.")
