from flask import Flask, request, jsonify
import os
import json
import requests
import logging
from typing import Dict, Any, Optional, List, Tuple
import subprocess
from dotenv import load_dotenv
from config import Config
import traceback
import datetime
from werkzeug.utils import secure_filename
import shutil
import re

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

app = Flask(__name__)

class PDFImageProcessor:
    def __init__(self, api_key: str):
        self.api_key = api_key
        self.headers = {"Authorization": f"Bearer {api_key}"}


    def extract_images_from_pdf(self, pdf_path: str, output_dir: str) -> Dict:
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        filename = os.path.basename(pdf_path)
        pdf_name = os.path.splitext(filename)[0]
        out_path = os.path.join(output_dir, pdf_name)
        os.makedirs(out_path, exist_ok=True)

        cmd = ['magic-pdf', '-p', pdf_path, '-o', out_path]
        logger.info(f"正在解析 PDF: {filename}")
        try:
            subprocess.run(cmd, check=True)
            logger.info(f"解析成功: {filename}")
            return {"success": True, "results": [{"filename": filename, "status": "success"}]}
        except subprocess.CalledProcessError as e:
            logger.error(f"解析失败: {filename}")
            logger.error(e.stderr)
            return {"success": False, "results": [{"filename": filename, "status": "failed", "error": str(e.stderr)}]}

    
    def upload_images_via_http(self, output_dir: str, upload_url: str, bucket_name: str, image_data: Optional[list] = None, pdf_name: str = '') -> Dict:
        results = []
        valid_image_paths = set()
        
        if image_data:
            for item in image_data:
                if item.get("type") == "image":
                    img_path = item.get("img_path")
                    if img_path:
                        try:
                            # 只使用相对路径部分
                            img_rel_path = img_path.split('auto/', 1)[-1]
                            # 构建完整的路径
                            full_path = os.path.join(output_dir, pdf_name, pdf_name, 'auto', img_rel_path)
                            valid_image_paths.add(os.path.normpath(full_path))
                        except Exception as e:
                            logger.warning(f"解析图片路径失败: {img_path}, 错误: {str(e)}")
            
            for image_path in valid_image_paths:
                if not os.path.exists(image_path):
                    logger.warning(f"跳过不存在的文件: {image_path}")
                    continue
                try:
                    logger.debug(f"准备上传图片: {image_path}")
                    with open(image_path, 'rb') as f:
                        files = {'file': (os.path.basename(image_path), f, 'image/jpeg')}
                        data = {"bucket_name": bucket_name}
                        response = requests.post(upload_url, files=files, data=data)
                        response.raise_for_status()
                        json_resp = response.json()
                        logger.info(f"图片上传成功: {image_path}")
                        results.append({"filename": os.path.basename(image_path), "status": "success", "response": json_resp})
                except Exception as e:
                    logger.error(f"上传失败: {image_path}，原因: {str(e)}")
                    results.append({"filename": os.path.basename(image_path), "status": "failed", "error": str(e)})
    
            return {"success": True, "results": results}

    def process_json_content(self, pdf_name: str, image_data: list, chunks: list) -> list:
        """
        处理JSON内容，替换图片路径（包括“图X 图名”和“(x) 图名”），避免截断完整描述
        """
        processed_chunks = []

        for chunk in chunks:
            if not isinstance(chunk, dict):
                logger.warning(f"遇到非字典类型的切片，跳过处理: {chunk}")
                processed_chunks.append(chunk)
                continue

            original_content = chunk.get('content', '')
            new_content = original_content

            # 构建图名 -> img 标签映射（完整 caption 匹配优先）
            image_path_map = {}

            for item in image_data:
                if item.get('type') != 'image':
                    continue

                img_caption = item.get('img_caption', [])
                img_path = item.get('img_path', '')

                file_name = img_path.split('images/', 1)[-1]
                img_url = f"{Config.API_CONFIG['image_address_url']}?bucket_name={Config.MINIO_CONFIG['bucket_name']}&file_name={file_name}"

                for caption in img_caption:
                    image_tag = f'<img src="{img_url}" alt="{caption}" style="max-width:100%">'

                    caption_clean = caption.strip()
                    image_path_map[caption_clean] = image_tag  # 原始 caption 精确替换

                    # 匹配 图X 图名（允许连字符）
                    match_figure = re.match(r'图\s*(\d+(?:\s*-\s*\d+)?)[\s\-–]*(.+)', caption_clean)
                    if match_figure:
                        figure_number = match_figure.group(1).replace(" ", "")
                        figure_name = match_figure.group(2).strip()
                        full_figure = f"图 {figure_number} {figure_name}"
                        compact_figure = f"图{figure_number} {figure_name}"
                        image_path_map[full_figure] = image_tag
                        image_path_map[compact_figure] = image_tag

                    # 匹配 (x) 图名
                    match_paren = re.match(r'\(?([a-zA-Z])\)?[\s\-–]*(.+)', caption_clean)
                    if match_paren:
                        letter = match_paren.group(1)
                        title = match_paren.group(2).strip()
                        variants = [
                            f"({letter}) {title}",
                            f"（{letter}） {title}",
                            f"{letter}) {title}",
                        ]
                        for var in variants:
                            image_path_map[var] = image_tag

            # 避免重复插入
            inserted = set()

            # 精确匹配替换（优先匹配长的 key）
            for key in sorted(image_path_map.keys(), key=len, reverse=True):
                if key in new_content and key not in inserted:
                    new_content = new_content.replace(key, f'{key} {image_path_map[key]}')
                    inserted.add(key)

            chunk['content'] = new_content
            processed_chunks.append(chunk)

        return processed_chunks


                                

    def get_chunks_from_ragflow(self, dataset_id: str, document_id: str) -> list:
        try:
            response = requests.get(
                f"http://{Config.RAGFLOW_CONFIG['address']}/api/v1/datasets/{dataset_id}/documents/{document_id}/chunks",
                headers={"Authorization": f"Bearer {self.api_key}"}
            )
            response.raise_for_status()
            data = response.json()

            # 修复：深入解析嵌套字段
            chunks = data.get("data", {}).get("chunks", [])

            if not isinstance(chunks, list):
                logger.error(f"RAGFlow 返回的数据格式不正确: {type(chunks)}，内容: {chunks}")
                return []

            return chunks
        except Exception as e:
            logger.error(f"获取切片失败: {str(e)}")
            raise

    def update_chunk_content(self, dataset_id: str, document_id: str, chunk_id: str, content: str) -> bool:
        """
        更新切片内容
        
        Args:
            dataset_id: 数据集ID
            document_id: 文档ID
            chunk_id: 切片ID
            content: 新的内容
            
        Returns:
            bool: 是否更新成功
        """
        try:
            response = requests.put(
                f"http://{Config.RAGFLOW_CONFIG['address']}/api/v1/datasets/{dataset_id}/documents/{document_id}/chunks/{chunk_id}",
                headers={"Authorization": f"Bearer {self.api_key}"},
                json={'content': content}
            )
            response.raise_for_status()
            return True
        except Exception as e:
            logger.error(f"更新切片失败: {str(e)}")
            return False

@app.route('/upload_file', methods=['POST'])
def upload_file():
    """接收文件流并保存到本地"""
    try:
        # 获取 dataset_id 和 document_id 参数
        dataset_id = request.form.get('dataset_id')
        document_id = request.form.get('document_id')
        if not dataset_id or not document_id:
            return jsonify({'error': 'Missing dataset_id or document_id'}), 400
        
        # 检查配置
        if not all([
            Config.INPUT_DIR,
            Config.OUTPUT_DIR,
            Config.MINIO_CONFIG.get('bucket_name'),
            Config.API_CONFIG.get('image_address_url'),
            Config.RAGFLOW_CONFIG.get('address')
        ]):
            return jsonify({'error': '配置不完整'}), 500
            
        # 检查请求头
        auth_header = request.headers.get('Authorization')
        if not auth_header or not auth_header.startswith('Bearer '):
            return jsonify({'error': 'Missing or invalid Authorization header'}), 401

        # 获取所有上传的文件
        files = request.files.getlist('file')
        if not files:
            return jsonify({'error': 'No files provided'}), 400

        # 初始化处理器
        processor = PDFImageProcessor(api_key=auth_header.split(' ')[1])
        
        # 保存所有文件并处理
        results = []
        for file in files:
            filename = file.filename
            if not filename.lower().endswith('.pdf'):
                results.append({
                    'filename': filename,
                    'status': 'failed',
                    'error': 'Only PDF files are allowed'
                })
                continue

            pdf_name=os.path.splitext(filename)[0]

            # 保存到本地目录
            os.makedirs(Config.INPUT_DIR, exist_ok=True)
            # 为每个文件创建独立的临时目录
            file_input_dir = os.path.join(Config.INPUT_DIR, pdf_name)
            file_output_dir = os.path.join(Config.OUTPUT_DIR, pdf_name)

            # 清理并创建目录
            if os.path.exists(file_input_dir):
                shutil.rmtree(file_input_dir)
            os.makedirs(file_input_dir, exist_ok=True)

            if os.path.exists(file_output_dir):
                shutil.rmtree(file_output_dir)
            os.makedirs(file_output_dir, exist_ok=True)

            # 保存PDF到单独目录
            save_path = os.path.join(file_input_dir, filename)
            file.save(save_path)
            logger.info(f"文件已保存到: {save_path}")

            # 提取图片（仅处理当前PDF）
            result = processor.extract_images_from_pdf(save_path, file_output_dir)
           
            # # 提取图片
            # result = processor.extract_images_from_pdfs(Config.INPUT_DIR, Config.OUTPUT_DIR)
            if not result.get('success'):
                results.append({
                    'filename': filename,
                    'status': 'failed',
                    'error': 'PDF处理失败',
                    'details': result
                })
                continue

            # 获取JSON文件路径
            pdf_name = os.path.splitext(filename)[0]
            json_path = os.path.join(file_output_dir, pdf_name, pdf_name, 'auto', f'{pdf_name}_content_list.json')
            
            # 检查JSON文件是否存在
            if not os.path.exists(json_path):
                results.append({
                    'filename': filename,
                    'status': 'failed',
                    'error': 'Image data file not found',
                    'path': json_path
                })
                continue

            # 读取JSON文件
            with open(json_path, 'r', encoding='utf-8') as f:
                image_data = json.load(f)

            # 上传图片到MinIO
            upload_result = processor.upload_images_via_http(
                file_output_dir,
                Config.API_CONFIG['upload_url'],
                Config.MINIO_CONFIG['bucket_name'],
                image_data=image_data,  # 只传 type=image 的图
                pdf_name=pdf_name
            )
            if not upload_result.get('success'):
                results.append({
                    'filename': filename,
                    'status': 'failed',
                    'error': '图片上传失败',
                    'details': upload_result
                })
                continue

            # 从RAGFlow获取切片信息
            chunks = processor.get_chunks_from_ragflow(dataset_id, document_id)
            if not isinstance(chunks, list):
                logger.error(f"从 RAGFlow 获取的 chunks 数据不是列表: {type(chunks)}，内容为: {chunks}")
                results.append({
                    'filename': filename,
                    'status': 'failed',
                    'error': 'Invalid chunk format from RAGFlow'
                })
                continue

            logger.info(f"获取到 {len(chunks)} 个切片")

            # 处理图片路径替换
            processed_chunks = processor.process_json_content(pdf_name, image_data, chunks)

            # 更新切片内容
            success_count = 0
            failed_chunks = []
            for chunk in processed_chunks:
                if not isinstance(chunk, dict):
                    logger.warning(f"跳过无效切片: {chunk}")
                    continue
                chunk_id = chunk.get('id')
                content = chunk.get('content')
                if not chunk_id or content is None:
                    logger.warning(f"切片数据缺少'id'或'content': {chunk}")
                    continue

                if processor.update_chunk_content(dataset_id, document_id, chunk_id, content):
                    success_count += 1
                else:
                    failed_chunks.append(chunk_id)

            results.append({
                'filename': filename,
                'status': 'success',
                'image_processing': result,
                'upload_result': upload_result,
                'processed_chunks': processed_chunks,
                'update_summary': {
                    'total': len(processed_chunks),
                    'success': success_count,
                    'failed': len(failed_chunks),
                    'failed_chunks': failed_chunks
                }
            })

        # 返回所有文件的处理结果
        return jsonify({
            'success': True,
            'message': '所有文件处理完成',
            'results': results
        })

    except Exception as e:
        logger.error(f"处理文件时发生错误: {str(e)}")
        logger.error(traceback.format_exc())
        return jsonify({
            'error': '处理文件时发生错误',
            'details': str(e)
        }), 500


if __name__ == '__main__':
    app.run(
        host='0.0.0.0',
        port=18005,
        debug=Config.DEBUG
    )
