import chardet
import re
import io
from typing import Dict
import os
from typing import Optional, List,Tuple
class FileUtils:

    @staticmethod
    def detect_encoding(file_path: str, sample_size: int = 1024) -> str:
        """
        动态检测文件编码（支持中文编码回退）

        Args:
            file_path: 文件路径
            sample_size: 采样字节数

        Returns:
            str: 检测到的编码格式
        """
        try:
            # 优先尝试UTF-8 BOM检测
            with open(file_path, 'rb') as f:
                raw = f.read(sample_size)

            # # 检查UTF-8带BOM情况
            # if raw.startswith(chardet.Union..BOM_UTF8):
            #     return 'utf-8-sig'

            # 使用chardet进行检测
            result = chardet.detect(raw)

            # 置信度超过0.9时直接返回
            if result['confidence'] > 0.9:
                return result['encoding']

            # 特别检查中文编码
            if any(b in raw for b in [b'\xa7', b'\xb2', b'\xe6', b'\xc4']):
                # 检查常见中文编码
                encodings = ['gbk', 'gb18030', 'big5', 'utf-8']
                for encoding in encodings:
                    try:
                        with open(file_path, 'r', encoding=encoding) as f:
                            f.read(sample_size)
                        return encoding
                    except UnicodeDecodeError:
                        continue

            # 默认返回最安全的编码
            return 'utf-8'

        except Exception as e:
            print(f"编码检测失败: {e}")
            return 'utf-8'  # 最终回退到UTF-8

    @staticmethod
    def read_txt_file(file_path: str, encoding: Optional[str] = None) -> str:
        """
        按指定编码读取文本文件（自动处理编码回退）

        Args:
            file_path: 文件路径
            encoding: 强制指定编码（None时自动检测）

        Returns:
            str: 文件内容字符串
        """
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"文件不存在: {file_path}")

        # 如果未指定编码则自动检测
        actual_encoding = encoding or FileUtils.detect_encoding(file_path)

        # 尝试按检测到的编码读取
        try:
            with open(file_path, 'r', encoding=actual_encoding) as f:
                return f.read()
        except UnicodeDecodeError:
            # 如果检测编码失败，尝试常用编码回退
            print(f"{actual_encoding}解码失败，尝试常用编码回退...")
            fallback_encodings = ['gbk', 'gb18030', 'latin1', 'cp1252']

            for enc in fallback_encodings:
                try:
                    with open(file_path, 'r', encoding=enc) as f:
                        print(f"成功使用回退编码: {enc}")
                        return f.read()
                except UnicodeDecodeError:
                    continue

            # 所有编码都失败时尝试二进制读取
            print("所有编码尝试均失败，使用二进制模式读取")
            with open(file_path, 'rb') as f:
                raw = f.read()
                # 尝试使用errors参数处理无效字符
                return raw.decode('utf-8', errors='replace')

    @staticmethod
    def read_txt_lines(file_path: str, encoding: Optional[str] = None,
                       max_lines: Optional[int] = None) -> List[str]:
        """
        分行读取文本文件（支持编码自动检测）

        Args:
            file_path: 文件路径
            encoding: 强制指定编码
            max_lines: 最大读取行数

        Returns:
            List[str]: 行内容列表
        """
        content = FileUtils.read_txt_file(file_path, encoding)
        lines = content.splitlines()

        # 如果需要限制行数
        if max_lines is not None:
            lines = lines[:max_lines]

        return lines

    # file_utils.py（新增数据读取方法）
    @staticmethod
    def read_data_lines(file_path: str, start_line: int) -> Dict[any, List]:
        """
        从指定行号开始读取数据行（返回字典列表）
        """
        with open(file_path, 'rb') as f:
            raw_data = f.read()

        result = chardet.detect(raw_data[:1024])
        encoding = result['encoding'] if result['confidence'] > 0.9 else 'gbk'
        lines = raw_data.decode(encoding).splitlines()

        # 获取表头
        header, data_start_line = FileUtils.read_header(file_path)
        if not header:
            raise ValueError("无法识别表头")

        data_start_line = start_line

        # 使用指定起始行
        start_index =  data_start_line or 1

        # 初始化字段字典
        field_dict = {field: [] for field in header}

        for line in lines[start_index:]:
            stripped = line.strip()
            if not stripped:
                continue

            # 使用空格分隔字段
            values = [v.strip() for v in re.split(r'\s+', stripped)]
            if len(values) != len(header):
                continue


                # 将值追加到对应字段列表中
            for i, value in  enumerate(values):
                field_dict[header[i]].append(value)

        return field_dict

    @staticmethod
    def read_header(file_path: str, max_lines: int = 100) -> Tuple[List[str], int]:
        with open(file_path, 'rb') as f:
            raw_data = f.read()

        result = chardet.detect(raw_data[:1024])
        encoding = result['encoding'] if result['confidence'] > 0.9 else 'gbk'
        lines = raw_data.decode(encoding).splitlines()

        is_las = any(line.strip().startswith('~CURVE INFORMATION') for line in lines)
        header = []
        data_start_line = 0

        if is_las:
            in_curve_section = False
            for i, line in enumerate(lines[:max_lines]):
                stripped = line.strip()
                if not stripped:
                    continue

                if stripped.startswith('~CURVE INFORMATION'):
                    in_curve_section = True
                    continue

                if in_curve_section and stripped.startswith('#----'):
                    continue

                if in_curve_section and stripped.startswith('#MNEM.UNIT'):
                    continue

                if in_curve_section:
                    match = re.match(r'^([A-Z0-9]+)(?:\s+.*)*:\s*.*$', stripped)
                    if match:
                        field_match = re.match(r'^([A-Z0-9]+)', stripped)
                        if field_match:
                            header.append(field_match.group(1))
                        continue
                    else:
                        in_curve_section = False

                if re.match(r'^\s*\d+(\.\d+)?', stripped):
                    data_start_line = i
                    print("数据行开始行号：", data_start_line)
                    break

            if header:
                return header, data_start_line
            else:
                return [], data_start_line

        else:
            separators = ['\t', ',', '|', ' ', ';']
            for sep in separators:
                for i, line in enumerate(lines[:5]):
                    stripped = line.strip()
                    if not stripped:
                        continue

                    if re.match(r'^\s*\d+(\.\d+)?', stripped):
                        data_start_line = i
                        break

                    fields = [f.strip() for f in stripped.split(sep)]
                    fields = [f for f in fields if f]
                    if len(fields) >= 2 and not all(re.match(r'^\d+(\.\d+)?$', f) for f in fields):
                        return fields, data_start_line

            return [], data_start_line

