import pandas as pd
from pathlib import Path
from typing import Optional, Dict, Any
from datetime import datetime
import shutil

from .utils.config import ensure_data_dirs, get_settings
from .utils.logger import get_logger


class DataStorage:
    """数据存储管理器"""
    
    def __init__(self):
        self.dirs = ensure_data_dirs()
        self.settings = get_settings()
        self.logger = get_logger("DataStorage")
        self.data_dir = self.dirs["data_dir"] / "stocks"
        
    def _get_file_path(self, symbol: str, interval: str = '1d') -> Path:
        """获取数据文件路径"""
        interval_dir_map = {
            '1d': 'daily',
            '1h': 'hourly',
            '30m': 'minute',
            '15m': 'minute',
            '5m': 'minute'
        }
        
        subdir = interval_dir_map.get(interval, 'daily')
        return self.data_dir / subdir / f"{symbol}_{interval}.csv"
        
    def save_data(self, symbol: str, data: pd.DataFrame, interval: str = '1d', stock_name: str = None) -> bool:
        """
        保存股票数据到CSV文件

        Args:
            symbol: 股票代码
            data: 股票数据
            interval: 数据频率
            stock_name: 股票名称（可选）

        Returns:
            保存是否成功
        """
        try:
            if data.empty:
                self.logger.warning(f"Empty data for {symbol}, skipping save")
                return False

            file_path = self._get_file_path(symbol, interval)
            file_path.parent.mkdir(parents=True, exist_ok=True)

            # 确保Date列是索引
            if 'Date' in data.columns:
                data = data.set_index('Date')
            elif data.index.name != 'Date':
                data.index.name = 'Date'

            # 添加股票名称信息（如果有）
            if stock_name and 'name' not in data.columns:
                data['name'] = stock_name

            # 排序并去重
            data = data.sort_index().drop_duplicates()

            # 保存到CSV
            data.to_csv(file_path, encoding='utf-8')

            # 创建元数据文件
            if stock_name:
                self._save_metadata(symbol, interval, stock_name)

            self.logger.info(f"Saved {len(data)} records for {symbol}_{interval} to {file_path}")
            return True

        except Exception as e:
            self.logger.error(f"Failed to save data for {symbol}_{interval}: {e}")
            return False

    def _save_metadata(self, symbol: str, interval: str, stock_name: str):
        """保存股票元数据"""
        try:
            metadata_path = self.data_dir / "metadata" / f"{symbol}_{interval}_meta.json"
            metadata_path.parent.mkdir(parents=True, exist_ok=True)

            metadata = {
                "symbol": symbol,
                "name": stock_name,
                "interval": interval,
                "updated_at": datetime.now().isoformat()
            }

            import json
            with open(metadata_path, 'w', encoding='utf-8') as f:
                json.dump(metadata, f, ensure_ascii=False, indent=2)

        except Exception as e:
            self.logger.warning(f"Failed to save metadata for {symbol}_{interval}: {e}")

    def get_stock_name(self, symbol: str, interval: str = '1d') -> str:
        """获取股票名称"""
        try:
            metadata_path = self.data_dir / "metadata" / f"{symbol}_{interval}_meta.json"
            if metadata_path.exists():
                import json
                with open(metadata_path, 'r', encoding='utf-8') as f:
                    metadata = json.load(f)
                    return metadata.get("name", symbol)
            return symbol
        except Exception as e:
            self.logger.warning(f"Failed to get stock name for {symbol}_{interval}: {e}")
            return symbol

    def load_data(self, symbol: str, interval: str = '1d') -> pd.DataFrame:
        """加载本地股票数据"""
        try:
            file_path = self._get_file_path(symbol, interval)
            
            if not file_path.exists():
                self.logger.info(f"No data file found for {symbol}_{interval}")
                return pd.DataFrame()
                
            data = pd.read_csv(file_path, index_col=0, parse_dates=True)
            data.index.name = 'Date'
            
            self.logger.info(f"Loaded {len(data)} records for {symbol}_{interval}")
            return data
            
        except Exception as e:
            self.logger.error(f"Failed to load data for {symbol}_{interval}: {e}")
            return pd.DataFrame()
            
    def merge_data(self, symbol: str, new_data: pd.DataFrame, interval: str = '1d') -> pd.DataFrame:
        """
        合并新数据到现有数据
        自动去重和排序
        """
        try:
            existing_data = self.load_data(symbol, interval)
            
            if existing_data.empty:
                merged_data = new_data.copy()
            else:
                # 确保两个DataFrame都有Date索引
                if 'Date' in new_data.columns:
                    new_data = new_data.set_index('Date')
                elif new_data.index.name != 'Date':
                    new_data.index.name = 'Date'
                    
                # 合并数据
                merged_data = pd.concat([existing_data, new_data])
                
            # 去重并排序
            merged_data = merged_data.sort_index().drop_duplicates()
            
            # 保存合并后的数据
            self.save_data(symbol, merged_data, interval)
            
            self.logger.info(f"Merged data for {symbol}_{interval}: {len(merged_data)} total records")
            return merged_data
            
        except Exception as e:
            self.logger.error(f"Failed to merge data for {symbol}_{interval}: {e}")
            return pd.DataFrame()
            
    def get_last_date(self, symbol: str, interval: str = '1d') -> Optional[str]:
        """获取本地数据的最后更新日期"""
        try:
            data = self.load_data(symbol, interval)
            
            if data.empty:
                return None
                
            last_date = data.index.max().strftime('%Y-%m-%d')
            self.logger.debug(f"Last date for {symbol}_{interval}: {last_date}")
            return last_date
            
        except Exception as e:
            self.logger.error(f"Failed to get last date for {symbol}_{interval}: {e}")
            return None
            
    def backup_data(self, symbol: str, interval: str = '1d') -> bool:
        """备份数据文件"""
        try:
            file_path = self._get_file_path(symbol, interval)
            
            if not file_path.exists():
                return False
                
            # 创建备份目录
            backup_dir = self.dirs["data_dir"] / "backups" / datetime.now().strftime('%Y-%m-%d')
            backup_dir.mkdir(parents=True, exist_ok=True)
            
            # 复制文件
            backup_path = backup_dir / file_path.name
            shutil.copy2(file_path, backup_path)
            
            self.logger.info(f"Backed up {symbol}_{interval} to {backup_path}")
            return True
            
        except Exception as e:
            self.logger.error(f"Failed to backup data for {symbol}_{interval}: {e}")
            return False
            
    def get_data_info(self, symbol: str, interval: str = '1d') -> Dict[str, Any]:
        """
        获取数据文件信息
        
        Returns:
            {
                'file_exists': bool,
                'last_date': str,
                'record_count': int,
                'file_size': str,
                'data_range': tuple
            }
        """
        try:
            file_path = self._get_file_path(symbol, interval)
            
            info = {
                'file_exists': file_path.exists(),
                'last_date': None,
                'record_count': 0,
                'file_size': '0 B',
                'data_range': None
            }
            
            if not file_path.exists():
                return info
                
            # 文件大小
            size_bytes = file_path.stat().st_size
            if size_bytes < 1024:
                info['file_size'] = f"{size_bytes} B"
            elif size_bytes < 1024 * 1024:
                info['file_size'] = f"{size_bytes / 1024:.1f} KB"
            else:
                info['file_size'] = f"{size_bytes / (1024 * 1024):.1f} MB"
                
            # 数据信息
            data = self.load_data(symbol, interval)
            if not data.empty:
                info['record_count'] = len(data)
                info['last_date'] = data.index.max().strftime('%Y-%m-%d')
                info['data_range'] = (data.index.min().strftime('%Y-%m-%d'), 
                                    data.index.max().strftime('%Y-%m-%d'))
                
            return info
            
        except Exception as e:
            self.logger.error(f"Failed to get data info for {symbol}_{interval}: {e}")
            return {'file_exists': False, 'last_date': None, 'record_count': 0, 
                   'file_size': '0 B', 'data_range': None}
                   
    def clean_duplicate_data(self, symbol: str, interval: str = '1d') -> int:
        """清理重复数据，返回清理的记录数"""
        try:
            data = self.load_data(symbol, interval)
            
            if data.empty:
                return 0
                
            original_count = len(data)
            data_cleaned = data.drop_duplicates()
            cleaned_count = len(data_cleaned)
            
            duplicates_removed = original_count - cleaned_count
            
            if duplicates_removed > 0:
                self.save_data(symbol, data_cleaned, interval)
                self.logger.info(f"Cleaned {duplicates_removed} duplicate records for {symbol}_{interval}")
                
            return duplicates_removed
            
        except Exception as e:
            self.logger.error(f"Failed to clean duplicates for {symbol}_{interval}: {e}")
            return 0