import os
import re
import json
import pandas as pd
from PyPDF2 import PdfReader
from multiprocessing import Pool, Lock, Manager,cpu_count
from typing import List, Dict, Optional, Union
import fitz  # PyMuPDF
import traceback
from tqdm import tqdm  # 进度条库

# 配置参数
KEYWORDS = ['Code Availibility', 'AVAILABILITY']  # 匹配任意一个关键词即可
DEFAULT_OUTPUT_FOLDER = "output_results"
DEFAULT_INPUT_FOLDER = "pdf_files"
DEFAULT_PROCESSES = cpu_count()-1  # 默认进程数等于CPU核心数

def normalize_paragraphs(text: str) -> str:
    """
    规范化段落格式：
    1. 在']: '后面添加一个换行符
    2. 在'[通过关键词'前面添加两个换行符（除了第一个段落）
    3. 合并段落内部的换行符
    4. 确保段落之间用两个换行符分隔
    5. 移除链接中的空格，确保URL完整性
    """
    if not text:
        return text
    # 先处理链接，移除其中的空格（包括换行导致的空格）
    def remove_url_spaces(match):
        url = match.group()
        # 移除URL中的空格，但保留://后的第一个/和原始路径分隔符/
        return url.replace(' ', '')
    
    # 匹配URL并移除其中的空格
    url_pattern = re.compile(
        r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
    )
    text = url_pattern.sub(remove_url_spaces, text)
    
    # 分割段落（基于[Keyword: ...]标记）
    paragraphs = []
    current_paragraph = []
    
    for line in text.split('\n'):
        if line.startswith('[通过关键词:') and current_paragraph:
            # 遇到新段落，保存当前段落
            paragraph_text = ' '.join([s.strip() for s in current_paragraph if s.strip()])
            paragraphs.append(paragraph_text)
            current_paragraph = [line]
        else:
            current_paragraph.append(line)
    
    # 添加最后一个段落
    if current_paragraph:
        paragraph_text = ' '.join([s.strip() for s in current_paragraph if s.strip()])
        paragraphs.append(paragraph_text)
    
    # 处理每个段落，添加所需的换行符
    normalized_text = []
    for i, paragraph in enumerate(paragraphs):
        # 在']: '后面添加换行符
        if ']: ' in paragraph:
            parts = paragraph.split(']: ', 1)
            paragraph = parts[0] + ']: \n' + parts[1]
        
        # 如果不是第一个段落，在前面添加两个换行符
        if i > 0:
            paragraph = '\n\n' + paragraph
        
        normalized_text.append(paragraph)
    
    return ''.join(normalized_text)

def extract_links_from_normalized_text(text: str) -> List[str]:
    """
    从规范化后的文本中提取所有URL链接
    确保在文本规范化后再提取链接，避免因换行导致的链接不全
    """
    # 先合并所有换行符和空格，确保链接完整性
    normalized = ' '.join(text.split())
    url_pattern = re.compile(
        r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
    )
    return url_pattern.findall(normalized)

def get_safe_json_data(output_folder: str) -> List[Dict]:
    """安全地读取JSON数据，如果文件损坏则返回空列表"""
    json_path = os.path.join(output_folder, "results.json")
    if not os.path.exists(json_path):
        return []
    
    try:
        with open(json_path, 'r', encoding='utf-8') as f:
            return json.load(f)
    except json.JSONDecodeError:
        print("Warning: JSON file corrupted, creating new one")
        return []
    except Exception as e:
        print(f"Warning: Error reading JSON file: {str(e)}")
        return []

def save_safe_json_data(output_folder: str, data: List[Dict]) -> None:
    """安全地保存JSON数据"""
    json_path = os.path.join(output_folder, "results.json")
    try:
        # 先写入临时文件，确保原子性操作
        temp_path = json_path + ".tmp"
        with open(temp_path, 'w', encoding='utf-8') as f:
            json.dump(data, f, indent=2)
        
        # 重命名替换原文件
        if os.path.exists(json_path):
            os.remove(json_path)
        os.rename(temp_path, json_path)
    except Exception as e:
        print(f"Error saving JSON file: {str(e)}")
        traceback.print_exc()

def extract_largest_text(pdf_path: str) -> Optional[str]:
    """提取PDF第一页中最大的文本作为标题"""
    try:
        doc = fitz.open(pdf_path)
        if len(doc) == 0:
            return None
            
        page = doc[0]
        blocks = page.get_text("dict")["blocks"]
        if not blocks:
            return None
            
        # 找出最大的字体大小的文本块
        largest_block = max(
            (b for b in blocks if "lines" in b),
            key=lambda b: b.get("lines", [{}])[0].get("spans", [{}])[0].get("size", 0),
            default=None
        )
        
        if largest_block:
            text = " ".join(
                span["text"]
                for line in largest_block.get("lines", [])
                for span in line.get("spans", [])
            )
            return text.strip()
        return None
    except Exception:
        return None

def contains_any_keyword(text: str, keywords: List[str]) -> bool:
    """检查文本中是否包含任意一个关键词"""
    for keyword in keywords:
        if re.search(re.escape(keyword), text, re.IGNORECASE):
            return True
    return False

def extract_paragraphs_after_any_keyword(pdf_path: str, keywords: List[str]) -> Optional[str]:
    """
    提取任意关键词后的所有段落，如果没有关键词则返回None
    终止条件：英文句点+零个或多个空格+回车
    """
    try:
        reader = PdfReader(pdf_path)
        full_text = ""
        
        for page in reader.pages:
            full_text += page.extract_text() + "\n"
        
        # 首先检查是否包含任意关键词
        if not contains_any_keyword(full_text, keywords):
            return None
        
        # 提取所有关键词匹配位置
        keyword_matches = []
        for keyword in keywords:
            keyword_matches.extend(
                m for m in re.finditer(re.escape(keyword), full_text, re.IGNORECASE)
            )
        
        if not keyword_matches:
            return None
        
        # 按位置排序所有匹配项
        keyword_matches.sort(key=lambda m: m.start())
        
        extracted_paragraphs = []
        
        for match in keyword_matches:
            text_after_keyword = full_text[match.end():]
            
            # 使用终止条件：英文句点+零个或多个空格+回车
            paragraph_match = re.search(r'\.\s*\n', text_after_keyword)
            
            if paragraph_match:
                # 找到终止位置
                end_pos = paragraph_match.start()
                next_paragraph = text_after_keyword[:end_pos + 1].strip()  # 包含句点
            else:
                # 没有找到终止条件，取全部文本
                next_paragraph = text_after_keyword.strip()
                if next_paragraph and not next_paragraph.endswith('.'):
                    next_paragraph += '.'
            
            # 记录匹配到的关键词和对应的段落
            matched_keyword = match.group()
            extracted_paragraphs.append(f"[通过关键词: {matched_keyword}]: {next_paragraph}")
        
        # 合并所有段落
        combined_text = "\n".join(extracted_paragraphs)
        
        return combined_text
    except Exception:
        return None

def process_single_pdf(args: tuple) -> Dict:
    """处理单个PDF文件，返回处理结果"""
    input_folder, output_folder, filename = args[:3]
    pdf_path = os.path.join(input_folder, filename)
    
    try:
        # 检查并提取包含任意关键词的段落（原始提取）
        raw_extracted_text = extract_paragraphs_after_any_keyword(pdf_path, KEYWORDS)
        if not raw_extracted_text:
            return {"filename": filename, "status": "skipped", "reason": "no keywords found"}
        # 如果文字长度小于20，则跳过
        if len(raw_extracted_text) < 20:
            return {"filename": filename, "status": "skipped", "reason": "text too short"}
        # 如果含有Not applicable直接跳过
        if 'Not applicable' in raw_extracted_text:
            return {"filename": filename, "status": "skipped", "reason": "Not applicable"}
        # 先规范化文本格式
        normalized_text = normalize_paragraphs(raw_extracted_text)
        # 从规范化后的文本中提取链接
        links = extract_links_from_normalized_text(normalized_text)
        
        # 提取标题
        title = extract_largest_text(pdf_path)
        
        # 保存提取的文本（保存规范化后的版本）
        txt_filename = os.path.splitext(filename)[0] + ".txt"
        txt_path = os.path.join(output_folder, txt_filename)
        with open(txt_path, 'w', encoding='utf-8') as f:
            f.write(normalized_text)
        
        # 找出实际匹配到的关键词
        matched_keywords = []
        for keyword in KEYWORDS:
            if re.search(re.escape(keyword), normalized_text, re.IGNORECASE):
                matched_keywords.append(keyword)
        
        # 准备结果数据
        return {
            "filename": filename,
            "title": title,
            "links": links,
            "keywords_found": matched_keywords,
            "status": "processed",
            "paragraph_count": len(normalized_text.split("\n\n")),
            "link_count": len(links)
        }
        
    except Exception as e:
        return {
            "filename": filename,
            "status": "error",
            "error": str(e),
            "traceback": traceback.format_exc()
        }

def update_results(output_folder: str, csv_path: str, results: List[Dict]) -> None:
    """更新结果文件（JSON和CSV）"""
    try:
        # 更新JSON文件
        save_safe_json_data(output_folder, results)
        
        # 更新CSV文件
        df = pd.DataFrame(results)
        
        # 展开links列表为逗号分隔的字符串
        if 'links' in df.columns:
            df['links'] = df['links'].apply(lambda x: ', '.join(x) if isinstance(x, list) else '')
        
        # 展开keywords_found列表为逗号分隔的字符串
        if 'keywords_found' in df.columns:
            df['keywords_found'] = df['keywords_found'].apply(lambda x: ', '.join(x) if isinstance(x, list) else '')
        
        # 确保只保留需要的列
        required_columns = ['filename', 'title', 'links', 'keywords_found']
        existing_columns = [col for col in required_columns if col in df.columns]
        df = df[existing_columns]
        
        # 写入临时文件
        temp_path = csv_path + ".tmp"
        df.to_csv(temp_path, index=False, encoding='utf-8')
        
        # 原子性替换原文件
        if os.path.exists(csv_path):
            os.remove(csv_path)
        os.rename(temp_path, csv_path)
    except Exception as e:
        print(f"Error updating result files: {str(e)}")
        traceback.print_exc()

def process_pdf_folder(
    input_folder: str = DEFAULT_INPUT_FOLDER,
    output_folder: str = DEFAULT_OUTPUT_FOLDER,
    num_processes: int = DEFAULT_PROCESSES
) -> None:
    """使用多进程处理PDF文件夹"""
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)
    
    print(f"Processing PDFs with keywords (any of): {KEYWORDS}")
    
    # 初始化CSV文件
    csv_path = os.path.join(output_folder, "results.csv")
    if not os.path.exists(csv_path):
        pd.DataFrame(columns=["filename", "title", "links", "keywords_found"]).to_csv(
            csv_path, index=False
        )
    
    # 初始化JSON文件
    json_path = os.path.join(output_folder, "results.json")
    if not os.path.exists(json_path):
        save_safe_json_data(output_folder, [])
    
    # 获取所有需要处理的PDF文件
    all_pdf_files = [
        filename for filename in os.listdir(input_folder) 
        if filename.lower().endswith('.pdf')
    ]
    
    # 加载已有结果，确定需要处理的文件
    existing_results = get_safe_json_data(output_folder)
    processed_files = {result["filename"] for result in existing_results}
    
    # 准备任务列表（只处理未处理的文件）
    tasks = [
        (input_folder, output_folder, filename)
        for filename in all_pdf_files
        if filename not in processed_files
    ]
    
    total_files = len(all_pdf_files)
    processed_count = len(processed_files)
    remaining_count = len(tasks)
    
    print(f"Found {total_files} PDF files total")
    print(f"Already processed: {processed_count}")
    print(f"Remaining to process: {remaining_count}")
    
    if not tasks:
        print("No files to process")
        return
    
    # 使用多进程池处理
    results = existing_results.copy()
    with Pool(processes=num_processes) as pool:
        # 使用tqdm显示进度条
        for result in tqdm(pool.imap(process_single_pdf, tasks), total=len(tasks), desc="Processing PDFs"):
            results.append(result)
            
            # 定期更新结果文件（每处理10个文件或最后）
            if len(results) % 10 == 0 or len(results) == len(all_pdf_files):
                update_results(output_folder, csv_path, results)
    
    # 打印汇总统计
    success_count = sum(1 for r in results if r.get("status") == "processed")
    skipped_count = sum(1 for r in results if r.get("status") == "skipped")
    error_count = sum(1 for r in results if r.get("status") == "error")
    
    print("\nProcessing completed!")
    print(f"Total files: {total_files}")
    print(f"Successfully processed: {success_count}")
    print(f"Skipped (no keywords): {skipped_count}")
    print(f"Errors: {error_count}")
    
    if error_count > 0:
        print("\nError details:")
        for result in results:
            if result.get("status") == "error":
                print(f"\nFile: {result['filename']}")
                print(f"Error: {result['error']}")
                print("Traceback:")
                print(result['traceback'])

if __name__ == "__main__":
    # 处理PDF文件夹
    process_pdf_folder()