import PyPDF2
import re
import nltk
import os
import sys
from nltk.corpus import words, stopwords
from nltk.tokenize import word_tokenize
from tqdm import tqdm

# 创建NLTK数据目录
nltk_data_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'nltk_data')
os.makedirs(nltk_data_dir, exist_ok=True)

# 设置NLTK数据路径
nltk.data.path.insert(0, nltk_data_dir)

class DownloadProgressBar(nltk.downloader.Downloader):
    """
    自定义NLTK下载器，显示进度条
    """
    def download(self, info_or_id, download_dir=None, quiet=False):
        if download_dir is None:
            download_dir = self._download_dir
        
        # 检查是否已经下载
        if self.is_installed(info_or_id, download_dir):
            print(f"'{info_or_id}' 已下载在 '{download_dir}'")
            return True
        
        # 获取下载信息
        info = self._get_info(info_or_id)
        if info is None:
            return False
        
        print(f"正在下载 '{info.id}'...")
        
        # 创建进度条
        def _download_with_progress():
            def _reporthook(block_num, block_size, total_size):
                if total_size > 0:
                    progress_bar.update(block_size)
            
            total_size = info.size
            with tqdm(total=total_size, unit='B', unit_scale=True, desc=info.id) as progress_bar:
                return self._download_package(info, download_dir, _reporthook)
        
        return _download_with_progress()

# 初始化自定义下载器
nltk_downloader = DownloadProgressBar()

# 下载NLTK数据（仅在需要时）
def ensure_nltk_data():
    try:
        # 确保数据目录存在
        corpora_dir = os.path.join(nltk_data_dir, 'corpora')
        tokenizers_dir = os.path.join(nltk_data_dir, 'tokenizers')
        os.makedirs(corpora_dir, exist_ok=True)
        os.makedirs(tokenizers_dir, exist_ok=True)
        
        # 手动下载必要的数据集
        datasets = ['punkt', 'words', 'stopwords']
        for dataset in datasets:
            if not nltk.data.find(dataset):
                print(f"正在下载 '{dataset}'...")
                nltk.download(dataset, download_dir=nltk_data_dir, quiet=False)
        
        # 确认数据已下载
        try:
            # 测试数据可用性
            test_words = words.words()
            test_stopwords = stopwords.words('english')
            test_tokenize = word_tokenize("Test sentence.")
            print("NLTK数据已成功加载")
        except Exception as e:
            print(f"NLTK数据加载失败: {str(e)}")
            # 尝试再次下载
            for dataset in datasets:
                nltk.download(dataset, download_dir=nltk_data_dir, quiet=False, force=True)
    except Exception as e:
        print(f"NLTK数据确认过程出错: {str(e)}")

def extract_words(pdf_path, user_vocab=None):
    """
    Extract unfamiliar words from a PDF file.
    
    Args:
        pdf_path: Path to the PDF file
        user_vocab: Set of words already in user's vocabulary
        
    Returns:
        list: List of unfamiliar words
    """
    try:
        # 确保NLTK数据已下载
        ensure_nltk_data()
        
        # 尝试读取english词汇表(用于防止IndexError)
        try:
            english_words_list = words.words()
            if not english_words_list:
                raise ValueError("NLTK words vocabulary is empty")
            english_stopwords_list = stopwords.words('english')
            if not english_stopwords_list:
                raise ValueError("NLTK stopwords list is empty")
        except (LookupError, IndexError) as e:
            print(f"NLTK data error: {str(e)}")
            # 强制下载
            nltk.download('words', quiet=False, force=True)
            nltk.download('stopwords', quiet=False, force=True)
            # 重新获取
            english_words_list = words.words()
            english_stopwords_list = stopwords.words('english')
        
        # 打开PDF文件
        with open(pdf_path, 'rb') as file:
            reader = PyPDF2.PdfReader(file)
            
            if not reader.pages:
                return []  # 返回空列表，如果PDF没有页面
            
            # 提取每页文本
            text = ""
            for page in reader.pages:
                page_text = page.extract_text()
                if page_text:  # 确保文本不为None
                    text += page_text
        
        if not text.strip():
            return []  # 如果提取的文本为空，返回空列表
        
        # 分词
        tokens = word_tokenize(text.lower())
        
        if not tokens:
            return []  # 如果没有词，返回空列表
        
        # 安全地处理停用词和词汇表 
        english_stopwords = set(english_stopwords_list)
        english_vocab = set(w.lower() for w in english_words_list)
        
        # 确保集合非空
        if not english_vocab:
            print("警告: 英语词汇表为空")
            return []
            
        # 找出英语词汇中存在但不在用户词汇表中的单词
        if user_vocab is None:
            user_vocab = set()
        
        # 找出与用户生词本相关的单词
        unfamiliar_words = [
            word for word in tokens 
            if word and word.isalpha() 
            and word not in english_stopwords 
            and word in english_vocab
            and word not in user_vocab
        ]
        
        # 去重并排序
        unique_unfamiliar_words = sorted(set(unfamiliar_words))
        
        return unique_unfamiliar_words
        
    except Exception as e:
        print(f"extract_words error: {str(e)}")
        return []  # 发生任何错误，返回空列表 