import os
import re
import json
import csv
import time
from multiprocessing import Pool, cpu_count, Lock
from PyPDF2 import PdfReader

# 配置变量
PDF_FOLDER = "./pdf_files"  # PDF文件夹路径
OUTPUT_DIR = "./out"        # 输出目录
TEXT_DIR = os.path.join(OUTPUT_DIR, "text_contents")  # 全文文本输出目录
os.makedirs(OUTPUT_DIR, exist_ok=True)  # 确保目录存在
os.makedirs(TEXT_DIR, exist_ok=True)    # 确保文本目录存在
OUTPUT_CSV = os.path.join(OUTPUT_DIR, "pdf_extracted_info.csv")  # CSV输出文件
OUTPUT_JSON = os.path.join(OUTPUT_DIR, "pdf_extracted_info.json")  # JSON输出文件
MAX_WORKERS = cpu_count()  # 使用所有CPU核心

# 多进程文件写入锁
csv_lock = Lock()
json_lock = Lock()
text_lock = Lock()

def init_output_files():
    """初始化输出文件"""
    with csv_lock:
        with open(OUTPUT_CSV, 'w', newline='', encoding='utf-8') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=['file_name', 'title', 'github_links'])
            writer.writeheader()
    
    with json_lock:
        with open(OUTPUT_JSON, 'w', encoding='utf-8') as jsonfile:
            json.dump([], jsonfile)

def extract_full_text(pdf_path):
    """提取PDF全文内容"""
    full_text = []
    try:
        with open(pdf_path, 'rb') as file:
            reader = PdfReader(file)
            for page in reader.pages:
                page_text = page.extract_text()
                if page_text:
                    full_text.append(page_text)
        return "\n".join(full_text)
    except Exception as e:
        print(f"提取全文出错({pdf_path}): {str(e)}")
        return None

def save_full_text(pdf_name, text):
    """保存全文到TXT文件"""
    if not text:
        return False
    
    txt_filename = os.path.splitext(pdf_name)[0] + ".txt"
    txt_path = os.path.join(TEXT_DIR, txt_filename)
    
    with text_lock:
        try:
            with open(txt_path, 'w', encoding='utf-8') as txtfile:
                txtfile.write(text)
            return True
        except Exception as e:
            print(f"保存全文出错({txt_path}): {str(e)}")
            return False

def extract_largest_font_text(pdf_path):
    """提取PDF中字体最大的文本作为标题"""
    try:
        with open(pdf_path, 'rb') as file:
            reader = PdfReader(file)
            first_page = reader.pages[0]
            content = first_page.extract_text()
            
            if not content:
                return os.path.basename(pdf_path)
            
            lines = content.split('\n')
            return lines[0].strip() if lines else os.path.basename(pdf_path)
    except Exception as e:
        print(f"提取标题出错({pdf_path}): {str(e)}")
        return os.path.basename(pdf_path)

def extract_pdf_info(pdf_path):
    """从单个PDF提取信息"""
    title = extract_largest_font_text(pdf_path)
    github_links = set()
    full_text = None
    
    try:
        with open(pdf_path, 'rb') as file:
            reader = PdfReader(file)
            for page in reader.pages:
                page_text = page.extract_text()
                if page_text:
                    # 改进的GitHub链接正则匹配
                    matches = re.findall(
                        r'(?:https?://)?(?:www\.)?github\.com/[\w\-\.]+/[\w\-\.]+(?:/[\w\-\.]+)*/?', 
                        page_text
                    )
                    github_links.update(matches)
        
        # 如果有GitHub链接，提取全文
        if github_links:
            full_text = extract_full_text(pdf_path)
    except Exception as e:
        print(f"提取内容出错({pdf_path}): {str(e)}")
    
    return {
        'file_name': os.path.basename(pdf_path),
        'title': title,
        'github_links': list(github_links),
        'full_text': full_text
    }

def safe_append_to_csv(result):
    """线程安全地追加到CSV（仅当有GitHub链接时）"""
    if not result['github_links']:
        return
        
    with csv_lock:
        try:
            with open(OUTPUT_CSV, 'a', newline='', encoding='utf-8') as csvfile:
                writer = csv.DictWriter(csvfile, fieldnames=['file_name', 'title', 'github_links'])
                # 创建只包含所需字段的副本
                result_csv = {
                    'file_name': result['file_name'],
                    'title': result['title'],
                    'github_links': ' | '.join(result['github_links'])
                }
                writer.writerow(result_csv)
        except Exception as e:
            print(f"写入CSV出错: {str(e)}")

def safe_append_to_json(result):
    """线程安全地追加到JSON（仅当有GitHub链接时）"""
    if not result['github_links']:
        return
        
    with json_lock:
        try:
            # 读取现有数据
            existing_data = []
            if os.path.exists(OUTPUT_JSON):
                with open(OUTPUT_JSON, 'r', encoding='utf-8') as jsonfile:
                    existing_data = json.load(jsonfile)
            
            # 添加新数据（不包含全文内容以减小文件大小）
            result_json = result.copy()
            result_json.pop('full_text', None)
            existing_data.append(result_json)
            
            # 写入文件
            with open(OUTPUT_JSON, 'w', encoding='utf-8') as jsonfile:
                json.dump(existing_data, jsonfile, ensure_ascii=False, indent=4)
        except Exception as e:
            print(f"写入JSON出错: {str(e)}")

def process_single_file(pdf_path):
    """处理单个文件并保存结果"""
    start_time = time.time()
    filename = os.path.basename(pdf_path)
    result = None
    status = "成功"
    text_saved = False
    
    try:
        result = extract_pdf_info(pdf_path)
        if result['github_links']:
            safe_append_to_csv(result)
            safe_append_to_json(result)
            if result['full_text']:
                text_saved = save_full_text(filename, result['full_text'])
            status = "成功(有GitHub链接)"
        else:
            status = "成功(无GitHub链接)"
    except Exception as e:
        status = f"失败: {str(e)}"
    
    elapsed = time.time() - start_time
    return {
        'file': filename,
        'status': status,
        'time': f"{elapsed:.2f}秒",
        'result': result,
        'has_links': bool(result and result['github_links']) if result else False,
        'text_saved': text_saved
    }

def print_progress(result, processed, total):
    """打印处理进度"""
    status_color = "\033[32m" if "成功" in result['status'] else "\033[31m"
    link_indicator = " \033[34m[有链接]\033[0m" if result['has_links'] else ""
    text_indicator = " \033[33m[已保存全文]\033[0m" if result.get('text_saved') else ""
    
    print(f"{status_color}[{processed}/{total}] {result['file']} - {result['status']} - 耗时: {result['time']}{link_indicator}{text_indicator}\033[0m")
    
    if result['result'] and result['has_links']:
        print(f"  标题: {result['result']['title']}")
        print(f"  链接: {', '.join(result['result']['github_links'][:3])}" + 
              ("..." if len(result['result']['github_links']) > 3 else ""))

def process_pdf_folder(folder_path):
    """多进程处理PDF文件夹"""
    if not os.path.isdir(folder_path):
        print(f"\033[31m错误: PDF文件夹 '{folder_path}' 不存在\033[0m")
        return False

    # 初始化输出文件
    init_output_files()
    
    # 获取所有PDF文件
    pdf_files = []
    for f in os.listdir(folder_path):
        if f.lower().endswith('.pdf'):
            pdf_files.append(os.path.join(folder_path, f))
    
    if not pdf_files:
        print("\033[33m没有找到PDF文件\033[0m")
        return False
    
    total_files = len(pdf_files)
    print(f"\n开始处理 {total_files} 个PDF文件, 使用 {MAX_WORKERS} 个工作进程...")
    print(f"输出文件将保存在: {os.path.abspath(OUTPUT_DIR)}")
    print(f"全文内容将保存在: {os.path.abspath(TEXT_DIR)}\n")
    
    start_time = time.time()
    success_count = 0
    link_count = 0
    text_count = 0
    
    # 创建进程池
    with Pool(processes=MAX_WORKERS) as pool:
        results = pool.imap_unordered(process_single_file, pdf_files)
        
        # 显示进度
        for i, result in enumerate(results, 1):
            print_progress(result, i, total_files)
            
            if "成功" in result['status']:
                success_count += 1
                if result['has_links']:
                    link_count += 1
                    if result.get('text_saved'):
                        text_count += 1
    
    # 统计信息
    total_time = time.time() - start_time
    print(f"\n\033[1m处理完成!\033[0m")
    print(f"总文件数: {total_files}")
    print(f"成功处理: {success_count}")
    print(f"包含GitHub链接的文件: {link_count}")
    print(f"已保存全文的PDF: {text_count}")
    print(f"总耗时: {total_time:.2f}秒")
    
    if link_count > 0:
        print(f"\n结果已保存到:")
        print(f"- CSV文件: {os.path.abspath(OUTPUT_CSV)}")
        print(f"- JSON文件: {os.path.abspath(OUTPUT_JSON)}")
        print(f"- 全文TXT文件: {os.path.abspath(TEXT_DIR)}")
    else:
        print("\n没有找到包含GitHub链接的PDF文件")
    
    return True

if __name__ == "__main__":
    # 清屏并显示标题
    os.system('cls' if os.name == 'nt' else 'clear')
    print("\033[1mPDF GitHub链接提取工具（增强版）\033[0m")
    print("=" * 50)
    print("功能说明:")
    print("- 提取PDF中的GitHub链接")
    print("- 自动识别文档标题")
    print("- 为包含GitHub链接的PDF导出全文内容")
    print("=" * 50)
    
    if process_pdf_folder(PDF_FOLDER):
        print("\n操作完成。")
    else:
        print("\n处理过程中出现问题，请检查错误信息。")