import os
import zipfile
import re
import pandas as pd
from collections import OrderedDict
from bs4 import BeautifulSoup
import argparse
from typing import List, Dict, Tuple, Set
import logging
import json
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
import multiprocessing

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


class EPUBNewBookAnalyzer:
    """EPUB新书分析器类 - 专门用于分析新书中的生词"""

    def __init__(self, known_words_file: str = "known_words.json",
                 context_words: int = 12,
                 frequency_threshold: float = 0.00001,
                 enable_cross_file_deduplication: bool = True):
        self.known_words: Set[str] = set()
        self.word_frequencies: Dict[str, float] = {}
        self.context_words = context_words
        self.frequency_threshold = frequency_threshold
        self.enable_cross_file_deduplication = enable_cross_file_deduplication
        self.load_known_words(known_words_file)

    def load_known_words(self, known_words_file: str):
        """从文件加载已知单词数据"""
        try:
            if os.path.exists(known_words_file):
                with open(known_words_file, 'r', encoding='utf-8') as f:
                    known_data = json.load(f)
                    self.known_words = set(known_data.keys())
                    self.word_frequencies = known_data
                logger.info(f"从 {known_words_file} 加载了 {len(self.known_words)} 个已知单词")
            else:
                logger.error(f"已知单词文件不存在: {known_words_file}")
                raise FileNotFoundError(f"已知单词文件不存在: {known_words_file}")
        except Exception as e:
            logger.error(f"加载已知单词文件时出错: {e}")
            raise

    def extract_text_with_file_info(self, epub_path: str, target_files: List[str] = None) -> List[Tuple[str, str]]:
        """从EPUB文件中提取文本内容及其文件名"""
        text_with_files = []

        try:
            with zipfile.ZipFile(epub_path, 'r') as epub:
                html_files = [f for f in epub.namelist()
                              if f.endswith('.html') or f.endswith('.xhtml') or f.endswith('.htm')]

                # 如果指定了目标文件列表，只处理这些文件
                if target_files:
                    matched_files = []
                    unmatched_files = []
                    file_mapping = {}

                    for file in html_files:
                        file_basename = os.path.basename(file)
                        file_mapping[file_basename] = file
                        file_mapping[file] = file

                    for target_file in target_files:
                        target_file = target_file.strip()
                        found = False

                        if target_file in file_mapping:
                            matched_files.append(file_mapping[target_file])
                            found = True
                        else:
                            for file_path, file_basename in [(f, os.path.basename(f)) for f in html_files]:
                                if target_file.lower() in file_basename.lower():
                                    matched_files.append(file_path)
                                    found = True
                                    break

                        if not found:
                            unmatched_files.append(target_file)

                    if matched_files:
                        unique_files = []
                        seen = set()
                        for file in matched_files:
                            if file not in seen:
                                seen.add(file)
                                unique_files.append(file)
                        html_files = unique_files
                        logger.info(f"找到匹配文件: {html_files}")
                        if unmatched_files:
                            logger.warning(f"未找到文件: {unmatched_files}")
                    else:
                        logger.warning(f"未找到任何匹配文件，将处理所有文件")
                else:
                    html_files.sort()

                # 处理文件
                total_files = len(html_files)
                for i, file in enumerate(html_files, 1):
                    try:
                        logger.info(f"正在处理文件 [{i}/{total_files}]: {file}")
                        start_time = time.time()

                        with epub.open(file) as f:
                            content = f.read()
                            soup = BeautifulSoup(content, 'html.parser')
                            for script in soup(["script", "style"]):
                                script.decompose()

                            text = soup.get_text()
                            text_with_files.append((text, file))

                        elapsed = time.time() - start_time
                        logger.info(f"文件处理完成: {file} (耗时: {elapsed:.2f}秒)")
                    except Exception as e:
                        logger.warning(f"处理文件 {file} 时出错: {e}")
                        continue

        except Exception as e:
            logger.error(f"无法打开EPUB文件 {epub_path}: {e}")
            return []

        return text_with_files

    def extract_original_words_with_file_positions(self, text: str) -> List[Tuple[str, int]]:
        """提取原始单词及其在文件中的位置"""
        words_with_positions = []

        for match in re.finditer(r'\b[A-Za-z][A-Za-z\-]*[A-Za-z]?\b', text):
            word = match.group()
            if len(word) > 1 or word.lower() in ['a', 'i', 'A', 'I']:
                words_with_positions.append((word, len(words_with_positions) + 1))

        return words_with_positions

    def calculate_word_frequency_in_new_book(self, text: str) -> Dict[str, float]:
        """计算单词在新书中的频率"""
        words_with_positions = self.extract_original_words_with_file_positions(text)
        total_words = len(words_with_positions)

        if total_words == 0:
            return {}

        word_counter = {}
        for word, _ in words_with_positions:
            word_lower = word.lower()
            word_counter[word_lower] = word_counter.get(word_lower, 0) + 1

        # 计算频率百分比
        word_frequencies = {}
        for word, count in word_counter.items():
            word_frequencies[word] = (count / total_words) * 100

        return word_frequencies

    def find_intelligent_example(self, text: str, target_word: str, target_position: int,
                                 context_words: int = None) -> str:
        """智能查找包含目标单词的完整例句"""
        if context_words is None:
            context_words = self.context_words

        pattern = re.compile(r'\b' + re.escape(target_word) + r'\b', re.IGNORECASE)

        sentences = self.split_into_sentences(text)

        for sentence in sentences:
            if pattern.search(sentence):
                words_in_sentence = sentence.split()

                for i, word in enumerate(words_in_sentence):
                    if pattern.fullmatch(word):
                        start = max(0, i - context_words)
                        end = min(len(words_in_sentence), i + context_words + 1)

                        prefix = "..." if start > 0 else ""
                        suffix = "..." if end < len(words_in_sentence) else ""

                        context = ' '.join(words_in_sentence[start:end])
                        return f"{prefix}{context}{suffix}"

        return self.find_basic_example(text, target_word, context_words)

    def split_into_sentences(self, text: str) -> List[str]:
        """将文本分割成句子"""
        sentences = []

        lines = text.split('\n')
        for line in lines:
            line = line.strip()
            if not line:
                continue

            temp_sentences = re.split(r'([.!?]+["\']?\s+)', line)

            for i in range(0, len(temp_sentences) - 1, 2):
                if i + 1 < len(temp_sentences):
                    sentence = temp_sentences[i] + temp_sentences[i + 1]
                    sentences.append(sentence.strip())
                else:
                    sentences.append(temp_sentences[i].strip())

        sentences = [s for s in sentences if len(s) > 10 and any(c.isalpha() for c in s)]
        return sentences

    def find_basic_example(self, text: str, target_word: str, context_words: int) -> str:
        """基础方法查找例句"""
        pattern = re.compile(r'\b' + re.escape(target_word) + r'\b', re.IGNORECASE)

        words = text.split()
        for i, word in enumerate(words):
            if pattern.fullmatch(word):
                start = max(0, i - context_words)
                end = min(len(words), i + context_words + 1)

                prefix = "..." if start > 0 else ""
                suffix = "..." if end < len(words) else ""

                context = ' '.join(words[start:end])
                return f"{prefix}{context}{suffix}"

        return ""

    def find_new_words_with_examples(self, new_epub_path: str, target_files: List[str] = None) -> Dict[str, List[Dict]]:
        """在新书中查找生词并按文件分组"""
        if not self.known_words:
            logger.warning("没有已知单词数据")
            return OrderedDict()

        logger.info(f"开始分析新书: {new_epub_path}")
        if target_files:
            logger.info(f"只解析指定文件: {target_files}")

        text_with_files = self.extract_text_with_file_info(new_epub_path, target_files)
        if not text_with_files:
            logger.error(f"无法从新书 {new_epub_path} 提取文本")
            return OrderedDict()

        # 首先计算整个新书的单词频率
        all_text = "\n".join([text for text, _ in text_with_files])
        new_book_word_frequencies = self.calculate_word_frequency_in_new_book(all_text)

        file_new_words = OrderedDict()
        all_found_new_words = set() if self.enable_cross_file_deduplication else None

        for text, filename in text_with_files:
            file_new_words[filename] = []

        total_files = len(text_with_files)
        for file_idx, (text, filename) in enumerate(text_with_files, 1):
            logger.info(f"分析文件 [{file_idx}/{total_files}]: {filename}")
            start_time = time.time()

            file_words_with_positions = self.extract_original_words_with_file_positions(text)
            total_words_in_file = len(file_words_with_positions)

            if total_words_in_file == 0:
                continue

            found_words_in_file = set()
            new_words_in_file = 0

            for word, word_position in file_words_with_positions:
                word_lower = word.lower()

                is_known_word = word_lower in self.known_words
                already_in_file = word_lower in found_words_in_file
                already_in_previous_files = False
                if self.enable_cross_file_deduplication and all_found_new_words:
                    already_in_previous_files = word_lower in all_found_new_words

                # 检查频率是否低于阈值
                known_frequency = self.word_frequencies.get(word_lower, 0)
                is_low_frequency = known_frequency < self.frequency_threshold

                is_new_word = ((not is_known_word or is_low_frequency) and
                               not already_in_file and
                               (not self.enable_cross_file_deduplication or not already_in_previous_files) and
                               len(word) > 2 and
                               word_lower.isalpha())

                if is_new_word:
                    example_sentence = self.find_intelligent_example(text, word, word_position)

                    if example_sentence and len(example_sentence.strip()) > 20:
                        new_book_frequency = new_book_word_frequencies.get(word_lower, 0)

                        new_word_data = {
                            'word': word,
                            'word_lower': word_lower,
                            'example_sentence': example_sentence.strip(),
                            'source_file': os.path.basename(filename),
                            'word_position': word_position,
                            'total_words_in_file': total_words_in_file,
                            'position_percentage': round((word_position / total_words_in_file) * 100, 2),
                            'known_frequency': round(known_frequency, 8),  # 支持8位小数精度
                            'new_book_frequency': round(new_book_frequency, 8),  # 支持8位小数精度
                            'is_known_word': is_known_word,
                            'is_low_frequency': is_low_frequency
                        }

                        file_new_words[filename].append(new_word_data)
                        found_words_in_file.add(word_lower)
                        new_words_in_file += 1

                        if self.enable_cross_file_deduplication and all_found_new_words is not None:
                            all_found_new_words.add(word_lower)

            elapsed = time.time() - start_time
            logger.info(f"文件分析完成: {filename} (发现 {new_words_in_file} 个生词, 耗时: {elapsed:.2f}秒)")

        result = OrderedDict()
        for filename, words_list in file_new_words.items():
            if words_list:
                words_list.sort(key=lambda x: x['word_position'])
                result[filename] = words_list

        total_new_words = sum(len(words) for words in result.values())
        logger.info(f"在新书中共发现 {total_new_words} 个生词")
        return result

    def process_new_book(self, new_epub_path: str, output_dir: str, target_files: List[str] = None):
        """处理单本新书"""
        try:
            logger.info(f"开始处理新书: {new_epub_path}")
            start_time = time.time()

            base_name = os.path.splitext(os.path.basename(new_epub_path))[0]
            if target_files:
                file_names = [os.path.splitext(os.path.basename(f))[0] for f in target_files[:2]]
                files_suffix = "_".join(file_names)
                if len(target_files) > 2:
                    files_suffix += f"_and_{len(target_files) - 2}_more"
                output_filename = f"{base_name}_{files_suffix}_new_words.xlsx"
            else:
                output_filename = f"{base_name}_new_words.xlsx"

            output_path = os.path.join(output_dir, output_filename)

            file_words = self.find_new_words_with_examples(new_epub_path, target_files)

            if file_words:
                self.save_new_words_to_excel_by_file(file_words, output_path, target_files)
                logger.info(f"新书处理完成: {new_epub_path} -> {output_path}")
            else:
                logger.info(f"在新书中未发现生词: {new_epub_path}")

            total_time = time.time() - start_time
            logger.info(f"新书处理完成: {new_epub_path} (耗时: {total_time:.2f}秒)")
            return True, new_epub_path
        except Exception as e:
            logger.error(f"处理新书时出错 {new_epub_path}: {e}")
            return False, new_epub_path

    def save_new_words_to_excel_by_file(self, file_words: Dict[str, List[Dict]], output_path: str,
                                        target_files: List[str] = None):
        """按文件保存生词到Excel文件"""
        if not file_words:
            logger.warning("没有生词数据可保存")
            return False

        try:
            with pd.ExcelWriter(output_path, engine='openpyxl') as writer:
                summary_data = []
                total_new_words = 0
                cumulative_words = 0

                for filename, words_list in file_words.items():
                    file_basename = os.path.basename(filename)
                    file_word_count = words_list[0]['total_words_in_file'] if words_list else 0
                    new_words_count = len(words_list)
                    total_new_words += new_words_count
                    cumulative_words += new_words_count

                    summary_data.append({
                        '文件名': file_basename,
                        '生词数量': new_words_count,
                        '文件总单词数': file_word_count,
                        '生词密度(%)': round((new_words_count / file_word_count) * 100,
                                             2) if file_word_count > 0 else 0,
                        '累计生词数': cumulative_words,
                        '占比(%)': round((new_words_count / total_new_words) * 100, 2) if total_new_words > 0 else 0
                    })

                summary_df = pd.DataFrame(summary_data)
                summary_df.to_excel(writer, sheet_name='摘要', index=False)

                for filename, words_list in file_words.items():
                    if words_list:
                        df = pd.DataFrame(words_list)
                        df = df[['word', 'known_frequency', 'new_book_frequency', 'is_known_word',
                                 'is_low_frequency', 'word_position', 'total_words_in_file',
                                 'position_percentage', 'example_sentence', 'source_file']]

                        df = df.rename(columns={
                            'word': '单词',
                            'known_frequency': '已知语料频率(%)',
                            'new_book_frequency': '新书频率(%)',
                            'is_known_word': '是否已知单词',
                            'is_low_frequency': '是否低频单词',
                            'word_position': '单词位置',
                            'total_words_in_file': '文件总单词数',
                            'position_percentage': '位置百分比(%)',
                            'example_sentence': '例句',
                            'source_file': '源文件'
                        })

                        file_basename = os.path.basename(filename)
                        sheet_name = self.clean_sheet_name(file_basename)
                        df.to_excel(writer, sheet_name=sheet_name, index=False)

                all_words = []
                for filename, words_list in file_words.items():
                    all_words.extend(words_list)

                if all_words:
                    all_df = pd.DataFrame(all_words)
                    all_df = all_df[['word', 'known_frequency', 'new_book_frequency', 'is_known_word',
                                     'is_low_frequency', 'word_position', 'total_words_in_file',
                                     'position_percentage', 'example_sentence', 'source_file']]

                    all_df = all_df.rename(columns={
                        'word': '单词',
                        'known_frequency': '已知语料频率(%)',
                        'new_book_frequency': '新书频率(%)',
                        'is_known_word': '是否已知单词',
                        'is_low_frequency': '是否低频单词',
                        'word_position': '单词位置',
                        'total_words_in_file': '文件总单词数',
                        'position_percentage': '位置百分比(%)',
                        'example_sentence': '例句',
                        'source_file': '源文件'
                    })

                    all_df = all_df.sort_values(by=['源文件', '单词位置'])
                    all_df.to_excel(writer, sheet_name='所有生词', index=False)

            logger.info(f"生词表已保存到: {output_path}")
            return True

        except Exception as e:
            logger.error(f"保存生词表时出错: {e}")
            return False

    def clean_sheet_name(self, name: str, max_length: int = 31) -> str:
        """清理sheet名称"""
        clean_name = re.sub(r'[\\/*?:[\]]', '', name)
        if len(clean_name) > max_length:
            clean_name = clean_name[:max_length - 3] + '...'
        return clean_name


def parse_target_files(target_files_str: str) -> List[str]:
    """解析目标文件字符串"""
    if not target_files_str:
        return None
    return [f.strip() for f in target_files_str.split(',') if f.strip()]


def main():
    parser = argparse.ArgumentParser(description='分析新EPUB书中的生词')
    parser.add_argument('new_books_dir', help='新EPUB文件目录路径')
    parser.add_argument('--known-words-file', default='known_words.json',
                        help='已知单词列表文件路径 (默认: known_words.json)')
    parser.add_argument('-f', '--target-files',
                        help='指定只解析新书中的特定文件列表，解析顺序按列表顺序，用英文逗号分隔（前提是这本书里面有这些文件，如果没有，依旧按整本书来解析')
    parser.add_argument('-o', '--output-dir', default='output',
                        help='输出目录路径 (默认: output)')
    parser.add_argument('-c', '--context-words', type=int, default=12,
                        help='例句上下文单词数量 (默认: 12)')
    parser.add_argument('-r', '--frequency-threshold', type=float, default=0.00001,
                        help='频率阈值，低于此值的已知单词也视为生词 (默认: 0.00001)')
    parser.add_argument('-t', '--threads', type=int, default=multiprocessing.cpu_count(),
                        help=f'处理新书时使用的线程数 (默认: CPU核心数)')
    parser.add_argument('--no-cross-file-deduplication', action='store_true',
                        help='禁用跨文件去重，默认启用跨文件去重')

    args = parser.parse_args()

    # 解析目标文件列表
    target_files = parse_target_files(args.target_files)

    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)

    # 获取新书目录中的所有EPUB文件
    new_epub_files = [f for f in os.listdir(args.new_books_dir)
                      if f.lower().endswith('.epub')]

    if not new_epub_files:
        logger.error("在新书目录中未找到EPUB文件")
        return

    logger.info(f"找到 {len(new_epub_files)} 本新书，使用 {args.threads} 个线程处理")

    # 使用线程池处理多本新书
    with ThreadPoolExecutor(max_workers=args.threads) as executor:
        futures = []

        for epub_file in new_epub_files:
            epub_path = os.path.join(args.new_books_dir, epub_file)

            # 为每本书创建一个新的分析器实例，使用命令行参数
            book_analyzer_instance = EPUBNewBookAnalyzer(
                known_words_file=args.known_words_file,
                context_words=args.context_words,
                frequency_threshold=args.frequency_threshold,
                enable_cross_file_deduplication=not args.no_cross_file_deduplication
            )

            future = executor.submit(
                book_analyzer_instance.process_new_book,
                epub_path,
                args.output_dir,
                target_files
            )
            futures.append(future)

        # 等待所有任务完成
        success_count = 0
        fail_count = 0
        for future in as_completed(futures):
            success, book_path = future.result()
            if success:
                success_count += 1
            else:
                fail_count += 1

        logger.info(f"新书处理完成: 成功 {success_count} 本, 失败 {fail_count} 本")

    logger.info(f"生词结果已保存到目录: {args.output_dir}")


if __name__ == "__main__":
    main()