# core/data_model.py
import pandas as pd
import os
import json
from PySide6.QtCore import QObject, Signal
from datetime import datetime
import numpy as np

class DataModel(QObject):
    """数据模型类，负责数据的加载、存储和管理"""

    data_updated = Signal(str)  # 数据更新信号，参数为井名

    def __init__(self):
        super().__init__()
        self.data = {}  # 存储所有井数据 {well_name: DataFrame}
        self.metadata = {}  # 存储井元数据 {well_name: metadata_dict}
        self.data_dir = "data"  # 数据存储目录

        # 创建数据目录（如果不存在）
        if not os.path.exists(self.data_dir):
            os.makedirs(self.data_dir)

        # 加载已保存的数据
        self._load_existing_data()

    def _load_existing_data(self):
        """加载已保存的数据"""
        # 查找数据目录下的所有Excel和CSV文件
        for file_name in os.listdir(self.data_dir):
            file_path = os.path.join(self.data_dir, file_name)

            if os.path.isfile(file_path):
                file_ext = os.path.splitext(file_name)[1].lower()

                if file_ext in ['.xlsx', '.xls', '.csv', '.txt']:
                    try:
                        well_name = os.path.splitext(file_name)[0]
                        self._load_data_from_file(file_path, well_name)
                    except Exception as e:
                        print(f"加载文件 {file_name} 失败: {str(e)}")

    def _load_data_from_file(self, file_path, well_name):
        """从文件加载数据"""
        file_ext = os.path.splitext(file_path)[1].lower()

        if file_ext in ['.xlsx', '.xls']:
            df = pd.read_excel(file_path)
        elif file_ext == '.csv':
            df = pd.read_csv(file_path)
        elif file_ext == '.txt':
            df = pd.read_csv(file_path, sep='\t')
        else:
            raise ValueError(f"不支持的文件类型: {file_ext}")

        # 存储数据
        self.data[well_name] = df

        # 加载或创建元数据
        metadata_path = os.path.join(self.data_dir, f"{well_name}.json")
        if os.path.exists(metadata_path):
            with open(metadata_path, 'r', encoding='utf-8') as f:
                self.metadata[well_name] = json.load(f)
        else:
            # 创建新的元数据
            self.metadata[well_name] = {
                'well_name': well_name,
                'import_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                'file_path': file_path,
                'rows': len(df),
                'columns': list(df.columns),
                'statistics': self._calculate_basic_statistics(df)
            }
            # 保存元数据
            self._save_metadata(well_name)

    def _save_metadata(self, well_name):
        """保存元数据"""
        if well_name in self.metadata:
            metadata_path = os.path.join(self.data_dir, f"{well_name}.json")
            with open(metadata_path, 'w', encoding='utf-8') as f:
                json.dump(self.metadata[well_name], f, ensure_ascii=False, indent=4)

    def _calculate_basic_statistics(self, df):
        """计算基本统计信息"""
        stats = {}
        numeric_cols = df.select_dtypes(include=[np.number]).columns

        for col in numeric_cols:
            try:
                stats[col] = {
                    'mean': float(df[col].mean()),
                    'median': float(df[col].median()),
                    'std': float(df[col].std()),
                    'min': float(df[col].min()),
                    'max': float(df[col].max()),
                    'count': int(df[col].count())
                }
            except:
                continue

        return stats

    def get_well_list(self):
        """获取井列表"""
        return [
            {
                'well_name': well_name,
                'rows': self.metadata.get(well_name, {}).get('rows', 0),
                'columns': self.metadata.get(well_name, {}).get('columns', []),
                'import_time': self.metadata.get(well_name, {}).get('import_time', '')
            }
            for well_name in self.data.keys()
        ]

    def get_curve_names(self, well_name):
        """获取指定井的曲线名称列表"""
        if well_name in self.data:
            df = self.data[well_name]
            # 排除深度列
            depth_col = self._detect_depth_column(df.columns)
            if depth_col:
                return [col for col in df.columns if col != depth_col]
            return list(df.columns)
        return []

    def get_well_data(self, well_name):
        """获取指定井的数据"""
        return self.data.get(well_name)

    def import_data(self, file_path, well_name=None):
        """导入数据"""
        if not well_name:
            well_name = os.path.splitext(os.path.basename(file_path))[0]

        # 加载数据
        self._load_data_from_file(file_path, well_name)

        # 发送数据更新信号
        self.data_updated.emit(well_name)

        return well_name

    def export_data(self, well_name, export_path):
        """导出数据"""
        if well_name not in self.data:
            raise ValueError(f"井 '{well_name}' 不存在")

        df = self.data[well_name]
        file_ext = os.path.splitext(export_path)[1].lower()

        if file_ext == '.xlsx':
            df.to_excel(export_path, index=False)
        elif file_ext == '.csv':
            df.to_csv(export_path, index=False)
        elif file_ext == '.txt':
            df.to_csv(export_path, index=False, sep='\t')
        else:
            raise ValueError(f"不支持的导出文件类型: {file_ext}")

    def export_curve(self, well_name, curve_name, export_path):
        """导出单条曲线数据"""
        if well_name not in self.data:
            raise ValueError(f"井 '{well_name}' 不存在")

        df = self.data[well_name]

        if curve_name not in df.columns:
            raise ValueError(f"曲线 '{curve_name}' 在井 '{well_name}' 中不存在")

        # 获取深度列
        depth_col = self._detect_depth_column(df.columns)
        if not depth_col:
            # 如果没有深度列，导出整列
            df[[curve_name]].to_csv(export_path, index=False)
        else:
            # 导出深度和曲线两列
            df[[depth_col, curve_name]].to_csv(export_path, index=False)

    def rename_well(self, old_name, new_name):
        """重命名井"""
        if old_name not in self.data:
            raise ValueError(f"井 '{old_name}' 不存在")

        if new_name in self.data:
            raise ValueError(f"井 '{new_name}' 已存在")

        # 重命名数据和元数据
        self.data[new_name] = self.data.pop(old_name)
        self.metadata[new_name] = self.metadata.pop(old_name)
        self.metadata[new_name]['well_name'] = new_name

        # 重命名文件
        old_data_path = os.path.join(self.data_dir, f"{old_name}.xlsx")
        new_data_path = os.path.join(self.data_dir, f"{new_name}.xlsx")

        old_metadata_path = os.path.join(self.data_dir, f"{old_name}.json")
        new_metadata_path = os.path.join(self.data_dir, f"{new_name}.json")

        if os.path.exists(old_data_path):
            os.rename(old_data_path, new_data_path)

        if os.path.exists(old_metadata_path):
            os.rename(old_metadata_path, new_metadata_path)

        # 更新元数据中的文件路径
        self.metadata[new_name]['file_path'] = new_data_path

        # 保存元数据
        self._save_metadata(new_name)

        # 发送数据更新信号
        self.data_updated.emit(new_name)

    def delete_well(self, well_name):
        """删除井"""
        if well_name not in self.data:
            raise ValueError(f"井 '{well_name}' 不存在")

        # 删除数据和元数据
        del self.data[well_name]

        # 删除文件
        data_path = os.path.join(self.data_dir, f"{well_name}.xlsx")
        metadata_path = os.path.join(self.data_dir, f"{well_name}.json")

        if os.path.exists(data_path):
            os.remove(data_path)

        if os.path.exists(metadata_path):
            os.remove(metadata_path)

        # 如果元数据存在，也删除
        if well_name in self.metadata:
            del self.metadata[well_name]

    def _detect_depth_column(self, columns):
        """检测深度列"""
        for col in ['斜深', '井深', 'TVD', 'MD']:
            if col in columns:
                return col
        return None