import csv
import io
import itertools
import logging
import re
import time
from pathlib import Path

import chardet
import hyperscan
import regex

logger = logging.getLogger(__name__)


class GlossaryEntry:
    def __init__(self, source: str, target: str, target_language: str | None = None):
        # 确保所有字符串都是正确的UTF-8编码
        self.source = self._ensure_utf8_string(source)
        self.target = self._ensure_utf8_string(target)
        self.target_language = target_language

    @staticmethod
    def _ensure_utf8_string(text):
        """确保文本是正确的UTF-8字符串"""
        if isinstance(text, bytes):
            try:
                return text.decode('utf-8')
            except UnicodeDecodeError:
                # 尝试其他常见编码
                for encoding in ['gbk', 'gb2312', 'latin-1', 'cp1252']:
                    try:
                        return text.decode(encoding)
                    except UnicodeDecodeError:
                        continue
                # 如果都失败，使用错误处理
                return text.decode('utf-8', errors='replace')

        if isinstance(text, str):
            # 确保字符串可以正确编码为UTF-8
            try:
                text.encode('utf-8')
                return text
            except UnicodeEncodeError:
                return text.encode('utf-8', errors='replace').decode('utf-8')

        return str(text)

    def __repr__(self):
        return f"GlossaryEntry(source='{self.source}', target='{self.target}', target_language='{self.target_language}')"


def batched(iterable, n, *, strict=False):
    # batched('ABCDEFG', 3) → ABC DEF G
    if n < 1:
        raise ValueError("n must be at least one")
    iterator = iter(iterable)
    while batch := tuple(itertools.islice(iterator, n)):
        if strict and len(batch) != n:
            raise ValueError("batched(): incomplete batch")
        yield batch


TERM_NORM_PATTERN = re.compile(r"\s+", regex.UNICODE)


class Glossary:
    def __init__(self, name: str, entries: list[GlossaryEntry]):
        self.name = name

        # Deduplicate entries based on normalized source
        unique_entries = []
        seen_normalized_sources = set()
        for entry in entries:
            normalized_source = self.normalize_source(entry.source)
            if normalized_source not in seen_normalized_sources:
                unique_entries.append(entry)
                seen_normalized_sources.add(normalized_source)
        self.entries = unique_entries

        self.normalized_lookup: dict[str, tuple[str, str]] = {}
        self.id_lookup: list[tuple[str, str]] = []
        self.hs_dbs: list[hyperscan.Database] | None = None
        self._build_regex_and_lookup()

    @staticmethod
    def normalize_source(source_term: str) -> str:
        """Normalizes a source term by lowercasing and standardizing whitespace."""
        term = source_term.lower()
        term = TERM_NORM_PATTERN.sub(
            " ", term
        )  # Replace multiple whitespace with single space
        return term.strip()

    def _build_regex_and_lookup(self):
        logger.debug(
            f"start build regex for glossary {self.name} with {len(self.entries)} entries"
        )
        """
        Builds a combined regex for all source terms and a lookup dictionary
        from normalized source terms to (original_source, original_target).
        Regex patterns are sorted by length in descending order to prioritize longer matches.
        """
        self.normalized_lookup = {}

        if not self.entries:
            self.source_terms_regex = None
            return

        self.hs_dbs = []
        hs_pattern = []
        start = time.time()
        for idx, entry in enumerate(self.entries):
            normalized_key = self.normalize_source(entry.source)
            self.normalized_lookup[normalized_key] = (entry.source, entry.target)
            self.id_lookup.append((entry.source, entry.target))

            # 确保编码正确
            try:
                pattern_bytes = re.escape(entry.source).encode("utf-8")
                hs_pattern.append((pattern_bytes, idx))
            except UnicodeEncodeError as e:
                logger.error(f"Failed to encode source term '{entry.source}': {e}")
                continue

        chunk_size = 20000
        for i, pattern_chunk in enumerate(
                batched(hs_pattern, chunk_size, strict=False)
        ):
            logger.debug(
                f"building hs_db chunk {i + 1} / {len(self.entries) // chunk_size + 1}"
            )
            expressions, ids = zip(*pattern_chunk, strict=False)

            try:
                hs_db = hyperscan.Database()
                hs_db.compile(
                    expressions=expressions,
                    ids=ids,
                    elements=len(pattern_chunk),
                    flags=hyperscan.HS_FLAG_CASELESS | hyperscan.HS_FLAG_SINGLEMATCH,
                    # | hyperscan.HS_FLAG_UTF8
                    # | hyperscan.HS_FLAG_UCP,
                )
                self.hs_dbs.append(hs_db)
            except Exception as e:
                logger.error(f"Failed to compile hyperscan database: {e}")
                continue

        end = time.time()
        logger.debug(
            f"finished building regex for glossary {self.name} in {end - start:.2f} seconds"
        )
        if self.hs_dbs:
            logger.debug(
                f"build hs database for glossary {self.name} with {len(self.entries)} entries, hs_info: {self.hs_dbs[0].info()}"
            )
        if not self.hs_dbs:
            self.hs_dbs = None

    @classmethod
    def from_csv(cls, file_path: Path, target_lang_out: str) -> "Glossary":
        """
        Loads glossary entries from a CSV file.
        CSV format: source,target,tgt_lng (tgt_lng is optional)
        Filters entries based on tgt_lng matching target_lang_out.
        The glossary name is derived from the CSV filename.
        """
        glossary_name = file_path.stem
        loaded_entries: list[GlossaryEntry] = []

        # Normalize target_lang_out once for comparison
        normalized_target_lang_out = target_lang_out.lower().replace("-", "_")

        try:
            # 改进的编码检测和读取
            with file_path.open("rb") as f:
                content = f.read()

            # 检测编码
            detected = chardet.detect(content)
            encoding = detected["encoding"]
            confidence = detected["confidence"]

            logger.info(f"Detected encoding: {encoding} (confidence: {confidence})")
            print(f"检测到文件编码: {encoding} (置信度: {confidence:.2f})")

            # 如果置信度太低，尝试常见的中文编码
            if confidence < 0.7:
                for fallback_encoding in ['utf-8', 'gbk', 'gb2312', 'utf-8-sig']:
                    try:
                        test_content = content.decode(fallback_encoding)
                        encoding = fallback_encoding
                        logger.info(f"Using fallback encoding: {encoding}")
                        print(f"使用备用编码: {encoding}")
                        break
                    except UnicodeDecodeError:
                        continue

            # 解码内容
            try:
                text_content = content.decode(encoding)
            except UnicodeDecodeError:
                # 最后的备选方案
                text_content = content.decode('utf-8', errors='replace')
                logger.warning("Used UTF-8 with error replacement")
                print("警告: 使用UTF-8编码并替换错误字符")

            # 解析CSV
            buffer = io.StringIO(text_content)
            reader = csv.DictReader(buffer, doublequote=True)

            if not all(col in reader.fieldnames for col in ["source", "target"]):
                raise ValueError(
                    f"CSV file {file_path} must contain 'source' and 'target' columns."
                )

            for row_num, row in enumerate(reader, start=2):  # 从第2行开始计数（第1行是标题）
                try:
                    source = row["source"].strip() if row["source"] else ""
                    target = row["target"].strip() if row["target"] else ""
                    tgt_lng = row.get("tgt_lng", "").strip() if row.get("tgt_lng") else None

                    if not source or not target:
                        logger.warning(f"Empty source or target in row {row_num}, skipping")
                        continue

                    # 验证编码
                    try:
                        source.encode('utf-8')
                        target.encode('utf-8')
                    except UnicodeEncodeError as e:
                        logger.error(f"Encoding error in row {row_num}: {e}")
                        continue

                    if tgt_lng:
                        normalized_entry_tgt_lng = tgt_lng.lower().replace("-", "_")
                        if normalized_entry_tgt_lng != normalized_target_lang_out:
                            continue  # Skip if language doesn't match

                    entry = GlossaryEntry(source, target, tgt_lng)
                    loaded_entries.append(entry)

                    # 调试输出前几个条目
                    if len(loaded_entries) <= 3:
                        print(
                            f"  加载术语 {len(loaded_entries)}: '{entry.source}' -> '{entry.target}' (编码验证: {repr(entry.target)})")

                except Exception as e:
                    logger.error(f"Error processing row {row_num}: {e}")
                    continue

        except FileNotFoundError:
            raise
        except Exception as e:
            raise ValueError(
                f"Error reading or parsing CSV file {file_path}: {e}"
            ) from e

        logger.info(f"Successfully loaded {len(loaded_entries)} entries from {file_path}")
        return cls(name=glossary_name, entries=loaded_entries)

    def to_csv(self) -> str:
        """Exports the glossary entries to a CSV formatted string."""
        dict_data = [
            {
                "source": x.source,
                "target": x.target,
                "tgt_lng": x.target_language if x.target_language else "",
            }
            for x in self.entries
        ]
        buffer = io.StringIO()
        dict_writer = csv.DictWriter(
            buffer, fieldnames=["source", "target", "tgt_lng"], doublequote=True
        )
        dict_writer.writeheader()
        dict_writer.writerows(dict_data)
        return buffer.getvalue()

    def __repr__(self):
        return f"Glossary(name='{self.name}', num_entries={len(self.entries)})"

    def get_active_entries_for_text(self, text: str) -> list[tuple[str, str]]:
        """Returns a list of (original_source, target_text) tuples for terms found in the given text."""
        if not self.hs_dbs or not text:
            return []

        text = TERM_NORM_PATTERN.sub(" ", text)  # Normalize whitespace in the text
        if not text:
            return []

        active_entries = []

        def on_match(
                idx: int, _from: int, _to: int, _flags: int, _context=None
        ) -> bool | None:
            if idx < len(self.id_lookup):
                active_entries.append(self.id_lookup[idx])
            return False

        try:
            text_bytes = text.encode("utf-8")
            for hs_db in self.hs_dbs:
                # Scan the text with the hyperscan database
                scratch = hyperscan.Scratch(hs_db)
                hs_db.scan(text_bytes, on_match, scratch=scratch)
        except UnicodeEncodeError as e:
            logger.error(f"Failed to encode text for scanning: {e}")
            return []
        except Exception as e:
            logger.error(f"Error during hyperscan: {e}")
            return []

        return active_entries