#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
文件加载处理模块
负责CSV和Parquet文件的解析和加载
"""

import os
import json
import logging
import time
from pathlib import Path
from typing import Dict, List, Tuple, Optional, Union, Any

import duckdb
import pyarrow as pa
import pyarrow.csv as csv
import pyarrow.parquet as pq
from PySide6.QtCore import QObject, Signal

from core.db_engine import get_db_engine
from config.constants import CSV_DELIMITERS, DEFAULT_ENCODINGS, FILE_TYPES, CONFIG_DIR
from config.settings import settings


# 初始化日志记录器
logger = logging.getLogger("file_loader")


class FileLoadError(Exception):
    """文件加载错误异常"""
    pass


class FileLoader(QObject):
    """文件加载器类，处理文件解析和加载到数据库"""
    
    # 信号定义
    loading_progress = Signal(int, int)  # 当前进度, 总进度
    loading_completed = Signal(bool, str)  # 成功标志, 消息
    file_preview_ready = Signal(object)  # 文件预览数据
    
    def __init__(self):
        """初始化文件加载器"""
        super().__init__()
        self.db_engine = get_db_engine()
        self.file_history_path = CONFIG_DIR / "file_history.json"
        self.file_history = self._load_file_history()
        
    def _load_file_history(self) -> Dict[str, Dict[str, Any]]:
        """加载文件历史记录
        
        Returns:
            文件历史记录字典
        """
        if not self.file_history_path.exists():
            return {}
            
        try:
            with open(self.file_history_path, 'r', encoding='utf-8') as f:
                return json.load(f)
        except Exception as e:
            logger.error(f"加载文件历史记录失败: {str(e)}")
            return {}
            
    def _save_file_history(self) -> None:
        """保存文件历史记录"""
        try:
            with open(self.file_history_path, 'w', encoding='utf-8') as f:
                json.dump(self.file_history, f, ensure_ascii=False, indent=2)
        except Exception as e:
            logger.error(f"保存文件历史记录失败: {str(e)}")
    
    def is_supported_file(self, file_path: str) -> Tuple[bool, str]:
        """检查文件是否为支持的类型
        
        Args:
            file_path: 文件路径
            
        Returns:
            (支持标志, 文件类型)
        """
        path = Path(file_path)
        if not path.exists():
            return False, "文件不存在"
            
        suffix = path.suffix.lower()
        
        for file_type, info in FILE_TYPES.items():
            if suffix in info["extensions"]:
                return True, file_type
                
        return False, f"不支持的文件类型: {suffix}"
    
    def detect_csv_delimiter(self, file_path: str, sample_size: int = 1000) -> str:
        """检测CSV文件的分隔符
        
        Args:
            file_path: CSV文件路径
            sample_size: 采样行数
            
        Returns:
            检测到的分隔符
        """
        try:
            # 尝试不同的分隔符，选择产生最多列的分隔符
            best_delimiter = ","
            max_columns = 0
            
            # 读取文件头部进行检测
            with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                sample = ''.join(f.readline() for _ in range(min(sample_size, 100)))
            
            for delimiter in CSV_DELIMITERS:
                # 计算每个分隔符的出现次数
                if delimiter == "\t":
                    count = sample.count("\t")
                else:
                    count = sample.count(delimiter)
                    
                if count > max_columns:
                    max_columns = count
                    best_delimiter = delimiter
            
            return best_delimiter
        except Exception as e:
            logger.warning(f"检测CSV分隔符失败，使用默认分隔符',': {str(e)}")
            return ","
    
    def detect_encoding(self, file_path: str) -> str:
        """检测文件编码
        
        Args:
            file_path: 文件路径
            
        Returns:
            检测到的编码
        """
        # 尝试不同的编码
        for encoding in DEFAULT_ENCODINGS:
            try:
                with open(file_path, 'r', encoding=encoding) as f:
                    f.read(1024)
                return encoding
            except UnicodeDecodeError:
                continue
                
        # 如果都失败，返回默认编码
        return "UTF-8"
    
    def load_file_to_db(self, file_path: str, 
                       table_name: Optional[str] = None,
                       delimiter: Optional[str] = None,
                       encoding: Optional[str] = None,
                       header: Optional[bool] = None,
                       preview_rows: Optional[int] = None,
                       **kwargs) -> Tuple[bool, str]:
        """加载文件到数据库
        
        Args:
            file_path: 文件路径
            table_name: 表名，如果为None则使用文件名
            delimiter: CSV分隔符，如果为None则自动检测
            encoding: 文件编码，如果为None则自动检测
            header: 是否包含表头，如果为None则默认为True
            preview_rows: 预览行数，仅用于预览
            kwargs: 其他参数
            
        Returns:
            (成功标志, 消息)
        """
        try:
            path = Path(file_path)
            if not path.exists():
                return False, f"文件不存在: {file_path}"
            
            # 检查文件类型
            supported, file_type = self.is_supported_file(file_path)
            if not supported:
                return False, file_type  # 错误消息
            
            # 确定表名
            if not table_name:
                table_name = path.stem.replace(' ', '_').replace('-', '_')
            
            # 设置加载选项
            options = {}
            
            # CSV特有处理
            if file_type == "CSV":
                if not delimiter:
                    delimiter = self.detect_csv_delimiter(file_path)
                if not encoding:
                    encoding = self.detect_encoding(file_path)
                if header is None:
                    header = True
                    
                options["delimiter"] = delimiter
                options["encoding"] = encoding
                options["header"] = header
            
            # 加载到数据库
            success, message = self.db_engine.load_file(
                file_path, 
                table_name=table_name, 
                file_type=file_type.lower(),
                **options
            )
            
            if success:
                # 更新文件历史
                file_id = str(hash(file_path))
                self.file_history[file_id] = {
                    "file_path": file_path,
                    "file_type": file_type,
                    "table_name": table_name,
                    "last_accessed": time.time(),
                    "options": options
                }
                self._save_file_history()
                
                # 添加到最近文件
                settings.add_recent_file(file_path)
                settings.save_settings()
            
            return success, message
            
        except Exception as e:
            logger.error(f"加载文件失败: {str(e)}")
            return False, f"加载文件失败: {str(e)}"
    
    def get_file_preview(self, file_path: str, 
                         max_rows: int = 100,
                         delimiter: Optional[str] = None,
                         encoding: Optional[str] = None,
                         header: Optional[bool] = None) -> Tuple[bool, Union[object, str]]:
        """获取文件预览
        
        Args:
            file_path: 文件路径
            max_rows: 最大预览行数
            delimiter: CSV分隔符
            encoding: 文件编码
            header: 是否包含表头
            
        Returns:
            (成功标志, 预览数据帧或错误消息)
        """
        try:
            path = Path(file_path)
            if not path.exists():
                return False, f"文件不存在: {file_path}"
            
            # 检查文件类型
            supported, file_type = self.is_supported_file(file_path)
            if not supported:
                return False, file_type  # 错误消息
            
            # 创建临时的DuckDB连接
            conn = duckdb.connect(":memory:")
            
            # 根据文件类型加载预览
            if file_type == "CSV":
                if not delimiter:
                    delimiter = self.detect_csv_delimiter(file_path)
                if not encoding:
                    encoding = self.detect_encoding(file_path)
                if header is None:
                    header = True
                
                # 使用DuckDB的COPY方法从CSV文件加载数据
                header_option = 'true' if header else 'false'
                preview_data = conn.execute(f"""
                    SELECT * FROM read_csv_auto(
                        '{file_path}',
                        delim='{delimiter}',
                        header={header_option},
                        sample_size={max_rows}
                    ) LIMIT {max_rows}
                """).fetch_df()
                
            elif file_type == "PARQUET":
                # 使用DuckDB的COPY方法从Parquet文件加载数据
                preview_data = conn.execute(f"""
                    SELECT * FROM read_parquet('{file_path}') LIMIT {max_rows}
                """).fetch_df()
            
            # 关闭连接
            conn.close()
            
            # 发出预览就绪信号
            self.file_preview_ready.emit(preview_data)
            
            return True, preview_data
        
        except Exception as e:
            logger.error(f"获取文件预览失败: {str(e)}")
            return False, f"获取文件预览失败: {str(e)}"
    
    def get_file_history(self) -> List[Dict[str, Any]]:
        """获取文件历史记录列表
        
        Returns:
            文件历史记录列表
        """
        result = []
        for file_id, info in self.file_history.items():
            # 检查文件是否仍然存在
            if not os.path.exists(info["file_path"]):
                continue
                
            result.append({
                "file_id": file_id,
                "file_path": info["file_path"],
                "file_type": info["file_type"],
                "table_name": info["table_name"],
                "last_accessed": info["last_accessed"]
            })
            
        # 按最近访问时间排序
        result.sort(key=lambda x: x["last_accessed"], reverse=True)
        return result
    
    def clean_expired_history(self, days: int = 7) -> int:
        """清理过期的历史记录
        
        Args:
            days: 过期天数
            
        Returns:
            清理的记录数
        """
        if not self.file_history:
            return 0
            
        # 计算截止时间戳
        cutoff_time = time.time() - (days * 24 * 60 * 60)
        
        # 查找过期记录
        expired_ids = []
        for file_id, info in self.file_history.items():
            if info["last_accessed"] < cutoff_time:
                expired_ids.append(file_id)
                
        # 删除过期记录
        for file_id in expired_ids:
            del self.file_history[file_id]
            
        # 保存更改
        if expired_ids:
            self._save_file_history()
            
        return len(expired_ids)
    
    def load_file(self, file_path: str, table_name: str, options: Dict[str, Any]) -> None:
        """加载文件的简化接口，兼容主窗口调用
        
        Args:
            file_path: 文件路径
            table_name: 表名
            options: 加载选项
        """
        # 发出加载进度信号
        self.loading_progress.emit(0, 100)
        
        # 调用实际的加载方法
        success, message = self.load_file_to_db(
            file_path=file_path,
            table_name=table_name,
            **options
        )
        
        # 发出加载完成信号
        self.loading_completed.emit(success, message)
        
        # 发出进度完成信号
        self.loading_progress.emit(100, 100)


# 单例实例
_file_loader_instance = None

def get_file_loader() -> FileLoader:
    """获取文件加载器单例实例
    
    Returns:
        FileLoader实例
    """
    global _file_loader_instance
    if _file_loader_instance is None:
        _file_loader_instance = FileLoader()
    return _file_loader_instance
