# utils/data_manager.py
import os
import re
import json
import chardet
import numpy as np
import pandas as pd
from io import StringIO
from typing import Dict, List, Optional, Union
from PySide6.QtCore import QObject, Signal
from PySide6.QtWidgets import QMessageBox


class DataManager(QObject):
    """数据管理类，负责管理项目中的所有井数据"""

    data_updated = Signal(str)  # 数据更新信号，参数为井名

    # 预定义的常见深度列名
    DEPTH_COLUMN_CANDIDATES = ["DEPTH", "MD", "TVD", "斜深", "井深", "深度"]

    def __init__(self):
        super().__init__()
        self.well_data: Dict[str, pd.DataFrame] = {}  # 存储井数据
        self.depth_column = "DEPTH"  # 默认深度列名
        self.project_path = os.path.join("Well_Data_visualization_program")  # 项目路径
        self.encoding_priority = ["utf-8", "gbk", "gb2312", "latin-1"]  # 优先尝试的编码列表

    def set_project_path(self, path: str) -> bool:
        """设置项目路径并加载项目数据"""
        if not os.path.isdir(path):
            try:
                os.makedirs(path)
            except Exception as e:
                QMessageBox.critical(None, "错误", f"无法创建项目目录: {str(e)}")
                return False

        self.project_path = path
        self._load_project_data()
        return True

    def get_project_path(self) -> str:
        """获取当前项目路径"""
        return self.project_path

    def add_well(self, well_name: str, data: pd.DataFrame) -> bool:
        """添加新井数据"""
        if well_name in self.well_data:
            QMessageBox.warning(None, "警告", f"井名 '{well_name}' 已存在!")
            return False

        # 自动检测深度列
        if self.depth_column not in data.columns:
            detected_depth_col = self._detect_depth_column(data.columns)
            if detected_depth_col:
                self.depth_column = detected_depth_col
            else:
                QMessageBox.warning(None, "警告",
                                    f"数据不包含深度列（尝试了: {', '.join(self.DEPTH_COLUMN_CANDIDATES)}）")
                return False

        # 数据预处理
        data = self._preprocess_data(data)

        self.well_data[well_name] = data
        self._save_well_data(well_name)
        self.data_updated.emit(well_name)
        return True

    def update_well(self, well_name: str, data: pd.DataFrame) -> bool:
        """更新井数据"""
        if well_name not in self.well_data:
            QMessageBox.warning(None, "警告", f"井名 '{well_name}' 不存在!")
            return False

        # 确保数据包含深度列
        if self.depth_column not in data.columns:
            QMessageBox.warning(None, "警告", f"数据不包含深度列 '{self.depth_column}'")
            return False

        # 数据预处理
        data = self._preprocess_data(data)

        self.well_data[well_name] = data
        self._save_well_data(well_name)
        self.data_updated.emit(well_name)
        return True

    def delete_well(self, well_name: str) -> bool:
        """删除井数据"""
        if well_name not in self.well_data:
            return True  # 已经不存在，视为成功

        try:
            # 删除数据文件
            data_file = os.path.join(self.project_path, f"{well_name}.csv")
            if os.path.exists(data_file):
                os.remove(data_file)

            # 从内存中移除
            del self.well_data[well_name]

            # 更新项目文件
            self._save_project_info()

            self.data_updated.emit(well_name)
            return True
        except Exception as e:
            QMessageBox.critical(None, "错误", f"删除井数据失败: {str(e)}")
            return False

    def rename_well(self, old_name: str, new_name: str) -> bool:
        """重命名井"""
        if old_name not in self.well_data:
            QMessageBox.warning(None, "警告", f"井名 '{old_name}' 不存在!")
            return False

        if new_name in self.well_data:
            QMessageBox.warning(None, "警告", f"井名 '{new_name}' 已存在!")
            return False

        try:
            # 重命名数据文件
            old_file = os.path.join(self.project_path, f"{old_name}.csv")
            new_file = os.path.join(self.project_path, f"{new_name}.csv")

            if os.path.exists(old_file):
                os.rename(old_file, new_file)

            # 更新内存中的数据
            self.well_data[new_name] = self.well_data.pop(old_name)

            # 更新项目文件
            self._save_project_info()

            self.data_updated.emit(new_name)
            return True
        except Exception as e:
            QMessageBox.critical(None, "错误", f"重命名井失败: {str(e)}")
            return False

    def get_well_list(self) -> List[str]:
        """获取所有井名列表"""
        return list(self.well_data.keys())

    def get_well_data(self, well_name: str) -> Optional[pd.DataFrame]:
        """获取指定井的数据"""
        return self.well_data.get(well_name)

    def get_depth_column(self) -> str:
        """获取深度列名"""
        return self.depth_column

    def set_depth_column(self, column_name: str) -> None:
        """设置深度列名"""
        self.depth_column = column_name

    def import_well_data(self, file_path: str, well_name: Optional[str] = None) -> bool:
        """从文件导入井数据，支持多种格式"""
        try:
            _, ext = os.path.splitext(file_path)

            if ext.lower() == '.txt':
                # 处理文本文件
                data = self._read_text_file(file_path)
            elif ext.lower() == '.csv':
                data = self._read_csv_file(file_path)
            elif ext.lower() in ['.xlsx', '.xls']:
                data = self._read_excel_file(file_path)
            else:
                QMessageBox.warning(None, "警告", f"不支持的文件格式: {ext}")
                return False

            if data is None or data.empty:
                QMessageBox.warning(None, "警告", f"文件解析结果为空: {file_path}")
                return False

            # 生成井名（如果未提供）
            if not well_name:
                well_name = self._extract_well_name(os.path.basename(file_path))

            # 添加井数据
            return self.add_well(well_name, data)
        except Exception as e:
            QMessageBox.critical(None, "错误", f"导入数据失败: {str(e)}")
            return False

    def _read_text_file(self, file_path):
        """读取并解析文本格式的井数据文件"""
        try:
            # 尝试多种编码
            data = None
            for encoding in self.encoding_priority:
                try:
                    with open(file_path, 'r', encoding=encoding) as f:
                        lines = f.readlines()
                        data = self._parse_text_data(lines, encoding)
                        if data is not None and not data.empty:
                            print(f"使用编码 {encoding} 成功解析文件")
                            return data
                except UnicodeDecodeError:
                    continue

            # 使用chardet检测编码
            with open(file_path, 'rb') as f:
                raw_data = f.read()
                result = chardet.detect(raw_data)
                detected_encoding = result['encoding'] or 'gbk'

            try:
                with open(file_path, 'r', encoding=detected_encoding) as f:
                    lines = f.readlines()
                    data = self._parse_text_data(lines, detected_encoding)
                    if data is not None and not data.empty:
                        print(f"使用chardet检测的编码 {detected_encoding} 成功解析文件")
                        return data
            except Exception as e:
                print(f"使用chardet检测的编码 {detected_encoding} 解析失败: {e}")

            QMessageBox.warning(None, "警告", f"无法解析文件: {file_path}，尝试了所有可用编码")
            return pd.DataFrame()

        except Exception as e:
            print(f"读取文件 {file_path} 时出错: {e}")
            return pd.DataFrame()

    def _parse_text_data(self, lines, encoding):
        """解析文本数据，尝试多种格式"""
        # 格式1: LAS格式 (包含~ASCII DATA SECTION)
        data_start = next((i for i, line in enumerate(lines) if line.strip().startswith('~ASCII DATA SECTION')), None)
        if data_start is not None:
            return self._parse_las_format(lines, data_start, encoding)

        # 格式2: CSV或TSV格式
        if len(lines) > 1:
            # 检测分隔符
            sample_line = lines[1].strip()
            if ',' in sample_line:
                sep = ','
            elif '\t' in sample_line:
                sep = '\t'
            else:
                # 可能是空格分隔
                sep = r'\s+'

            # 尝试解析
            try:
                header_line = lines[0].strip()
                headers = header_line.split(sep) if sep != r'\s+' else header_line.split()

                # 检查是否有有效的表头
                if len(headers) > 1:
                    data_str = '\n'.join(lines[1:])
                    df = pd.read_csv(StringIO(data_str), sep=sep, header=None, names=headers)
                    return df
            except Exception as e:
                print(f"解析CSV/TSV格式失败: {e}")

        # 格式3: 简单空格分隔格式
        try:
            # 尝试自动检测表头
            header_candidates = []
            for i, line in enumerate(lines[:10]):
                line = line.strip()
                if not line:
                    continue

                parts = line.split()
                if len(parts) > 2:  # 至少有3列才可能是表头+数据
                    # 检查是否包含数字（如果包含数字，可能是数据行而不是表头）
                    if not any(part.replace('.', '', 1).isdigit() for part in parts):
                        header_candidates.append((i, line))

            # 如果找到可能的表头
            if header_candidates:
                header_line_num, header_line = header_candidates[0]
                headers = header_line.split()

                data_lines = lines[header_line_num + 1:]
                data_str = '\n'.join(data_lines)
                df = pd.read_csv(StringIO(data_str), sep=r'\s+', header=None, names=headers)
                return df
            else:
                # 没有找到表头，使用默认列名
                data_str = '\n'.join(lines)
                df = pd.read_csv(StringIO(data_str), sep=r'\s+', header=None,
                                 names=[f"Column{i}" for i in range(1, 100)])
                return df

        except Exception as e:
            print(f"解析简单空格分隔格式失败: {e}")

        return pd.DataFrame()

    def _parse_las_format(self, lines, data_start, encoding):
        """解析LAS格式数据"""
        try:
            # 提取表头（假设表头在 DATA SECTION 前2行）
            header_line = lines[data_start - 2].strip().lstrip('#').strip()
            headers = header_line.split() if header_line else []

            # 提取数据行
            data_lines = [line for line in lines[data_start + 1:] if line.strip()]
            if not data_lines:
                print("没有找到数据行")
                return pd.DataFrame()

            # 解析数据
            data_str = '\n'.join(data_lines)
            df = pd.read_csv(StringIO(data_str), sep=r'\s+', header=None, names=headers)
            return df
        except Exception as e:
            print(f"解析LAS格式失败: {e}")
            return pd.DataFrame()

    def _read_csv_file(self, file_path):
        """读取CSV文件，增强错误处理"""
        try:
            # 尝试多种编码
            for encoding in self.encoding_priority:
                try:
                    df = pd.read_csv(file_path, encoding=encoding)
                    return df
                except UnicodeDecodeError:
                    continue

            # 使用chardet检测编码
            with open(file_path, 'rb') as f:
                raw_data = f.read()
                result = chardet.detect(raw_data)
                detected_encoding = result['encoding'] or 'gbk'

            df = pd.read_csv(file_path, encoding=detected_encoding)
            return df

        except Exception as e:
            print(f"读取CSV文件失败: {e}")
            return pd.DataFrame()

    def _read_excel_file(self, file_path):
        """读取Excel文件，增强错误处理"""
        try:
            # 尝试读取第一个工作表
            df = pd.read_excel(file_path)
            return df
        except Exception as e:
            print(f"读取Excel文件失败: {e}")
            # 尝试读取所有表名
            try:
                excel_file = pd.ExcelFile(file_path)
                sheet_names = excel_file.sheet_names
                if sheet_names:
                    # 尝试第一个工作表
                    df = excel_file.parse(sheet_names[0])
                    return df
            except Exception as e2:
                print(f"备用方法读取Excel文件失败: {e2}")
                return pd.DataFrame()

    def _extract_well_name(self, file_name):
        """从文件名中提取井名，增强匹配规则"""
        # 先尝试匹配如 3-6A井 这样的模式
        pattern = r'(\d+-\d+[A-Za-z]*井)'
        match = re.search(pattern, file_name)
        if match:
            return match.group(1)

        # 再尝试匹配如 3-6A 这样的模式，然后添加"井"字
        pattern = r'(\d+-\d+[A-Za-z]*)'
        match = re.search(pattern, file_name)
        if match:
            return match.group(1) + '井'

        # 尝试匹配如 123井 这样的模式
        pattern = r'(\d+井)'
        match = re.search(pattern, file_name)
        if match:
            return match.group(1)

        # 尝试匹配包含 "井" 字的部分
        index = file_name.find('井')
        if index != -1:
            # 向前查找非数字和非字母的字符
            start = index
            while start > 0 and (file_name[start - 1].isdigit() or file_name[start - 1].isalpha()):
                start -= 1
            return file_name[start:index + 1]

        # 兜底方案：使用文件名（不含扩展名）
        base_name = os.path.splitext(file_name)[0]
        # 移除常见的前缀和后缀
        prefixes = ["WELL_", "WELL-", "LOG_", "LOG-", "DATA_", "DATA-"]
        suffixes = ["_DATA", "-DATA", "_LOG", "-LOG", "_WELL", "-WELL"]

        for prefix in prefixes:
            if base_name.startswith(prefix):
                base_name = base_name[len(prefix):]
                break

        for suffix in suffixes:
            if base_name.endswith(suffix):
                base_name = base_name[:-len(suffix)]
                break

        return base_name

    def _detect_depth_column(self, columns):
        """自动检测深度列"""
        # 精确匹配（不区分大小写）
        for col in columns:
            if col.lower() in [c.lower() for c in self.DEPTH_COLUMN_CANDIDATES]:
                return col
        # 模糊匹配（列名包含深度关键词）
        for col in columns:
            if any(keyword.lower() in col.lower() for keyword in self.DEPTH_COLUMN_CANDIDATES):
                return col
        return None

    def _preprocess_data(self, data):
        """数据预处理，包括排序、去重和缺失值处理"""
        if data is None or data.empty:
            return data

        # 按深度列排序
        if self.depth_column in data.columns:
            data = data.sort_values(by=self.depth_column)

        # 移除深度列中的重复值
        if self.depth_column in data.columns:
            data = data.drop_duplicates(subset=self.depth_column)

        # 处理缺失值
        # 可以根据需要添加更复杂的缺失值处理逻辑
        data = data.dropna(subset=[self.depth_column])

        return data

    def _save_well_data(self, well_name: str) -> bool:
        """保存井数据到CSV文件"""
        if well_name not in self.well_data:
            return False

        try:
            # 确保项目目录存在
            if not os.path.exists(self.project_path):
                os.makedirs(self.project_path)

            # 保存数据到CSV文件
            file_path = os.path.join(self.project_path, f"{well_name}.csv")
            self.well_data[well_name].to_csv(file_path, index=False)

            # 更新项目信息
            self._save_project_info()

            return True
        except Exception as e:
            QMessageBox.critical(None, "错误", f"保存井数据失败: {str(e)}")
            return False

    def _save_project_info(self) -> None:
        """保存项目元数据"""
        if not self.project_path:
            return

        try:
            # 构建项目信息
            project_info = {
                "depth_column": self.depth_column,
                "wells": list(self.well_data.keys())
            }

            # 保存到JSON文件
            info_file = os.path.join(self.project_path, "project_info.json")
            with open(info_file, 'w', encoding='utf-8') as f:
                json.dump(project_info, f, ensure_ascii=False, indent=2)
        except Exception as e:
            print(f"保存项目信息失败: {str(e)}")

    def _load_project_data(self) -> None:
        """从项目目录加载所有数据"""
        if not self.project_path or not os.path.exists(self.project_path):
            return

        # 加载项目信息
        info_file = os.path.join(self.project_path, "project_info.json")
        if os.path.exists(info_file):
            try:
                with open(info_file, 'r', encoding='utf-8') as f:
                    project_info = json.load(f)

                # 恢复深度列名
                if "depth_column" in project_info:
                    self.depth_column = project_info["depth_column"]

                # 加载所有井数据
                if "wells" in project_info:
                    for well_name in project_info["wells"]:
                        self._load_well_data(well_name)

            except Exception as e:
                print(f"加载项目信息失败: {str(e)}")

        # 如果没有项目信息，尝试从CSV文件加载
        else:
            csv_files = [f for f in os.listdir(self.project_path) if f.endswith('.csv')]
            for csv_file in csv_files:
                well_name = os.path.splitext(csv_file)[0]
                self._load_well_data(well_name)

    def _load_well_data(self, well_name: str) -> Optional[pd.DataFrame]:
        """从CSV文件加载井数据"""
        file_path = os.path.join(self.project_path, f"{well_name}.csv")
        if not os.path.exists(file_path):
            return None

        try:
            # 尝试多种编码加载
            for encoding in self.encoding_priority:
                try:
                    df = pd.read_csv(file_path, encoding=encoding)
                    # 检查是否包含深度列
                    if self.depth_column in df.columns:
                        self.well_data[well_name] = df
                        return df
                except UnicodeDecodeError:
                    continue

            # 使用chardet检测编码
            with open(file_path, 'rb') as f:
                raw_data = f.read()
                result = chardet.detect(raw_data)
                detected_encoding = result['encoding'] or 'gbk'

            try:
                df = pd.read_csv(file_path, encoding=detected_encoding)
                if self.depth_column in df.columns:
                    self.well_data[well_name] = df
                    return df
            except Exception as e:
                print(f"使用chardet检测的编码 {detected_encoding} 加载失败: {e}")

            QMessageBox.warning(None, "警告", f"无法加载文件: {file_path}，尝试了所有可用编码")
            return None
        except Exception as e:
            print(f"加载井数据 {well_name} 时出错: {e}")
            return None
