"""
K线数据缓存服务
实现本地CSV缓存，支持增量数据追加和查询
"""

import os
import csv
import pandas as pd
from datetime import datetime, timedelta
from typing import List, Dict, Optional
from ..config import config


class KlineCacheService:
    """K线数据缓存服务"""

    def __init__(self) -> None:
        """初始化缓存服务"""
        # 获取项目根目录
        base_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
        self.cache_dir = os.path.join(base_dir, 'data', 'kline_cache')
        os.makedirs(self.cache_dir, exist_ok=True)

    def get_cache_key(self, symbol: str, start_time: Optional[datetime] = None, end_time: Optional[datetime] = None) -> str:
        """
        生成缓存文件键名（v0.1.9: 支持分钟级别时间命名）

        Args:
            symbol: 交易对符号
            start_time: 开始时间
            end_time: 结束时间

        Returns:
            缓存文件名
        """
        if start_time and end_time:
            # v0.1.9: 包含时分信息（如: btcusdt_20240101_0900_20240101_1200.csv）
            start_str = start_time.strftime('%Y%m%d_%H%M')
            end_str = end_time.strftime('%Y%m%d_%H%M')
            return f"{symbol.lower()}_{start_str}_{end_str}.csv"
        else:
            return f"{symbol.lower()}.csv"

    def get_cache_path(self, symbol: str, start_time: Optional[datetime] = None, end_time: Optional[datetime] = None) -> str:
        """获取缓存文件路径"""
        cache_key = self.get_cache_key(symbol, start_time, end_time)
        return os.path.join(self.cache_dir, cache_key)

    def read_cache(self, symbol: str, start_time: Optional[datetime] = None, end_time: Optional[datetime] = None) -> pd.DataFrame:
        """
        读取缓存数据

        Args:
            symbol: 交易对符号
            start_time: 开始时间
            end_time: 结束时间

        Returns:
            缓存的K线数据DataFrame
        """
        cache_path = self.get_cache_path(symbol, start_time, end_time)

        if not os.path.exists(cache_path):
            return pd.DataFrame()

        try:
            df = pd.read_csv(cache_path, index_col=0)
            return df
        except Exception as e:
            print(f"读取缓存文件失败: {e}")
            return pd.DataFrame()

    def write_cache(self, symbol: str, data: List[Dict], start_time: Optional[datetime] = None, end_time: Optional[datetime] = None, append: bool = True) -> None:
        """
        写入缓存数据

        Args:
            symbol: 交易对符号
            data: K线数据列表
            start_time: 开始时间
            end_time: 结束时间
            append: 是否追加模式
        """
        if not data:
            return

        cache_path = self.get_cache_path(symbol, start_time, end_time)

        # 确保目录存在
        os.makedirs(os.path.dirname(cache_path), exist_ok=True)

        # 转换为DataFrame
        df_new = pd.DataFrame(data)

        if append and os.path.exists(cache_path):
            # 追加模式：读取现有数据，去重后合并
            try:
                df_existing = pd.read_csv(cache_path, index_col=0)
                # 基于时间戳去重
                df_combined = pd.concat([df_existing, df_new]).drop_duplicates(
                    subset=['open_time'], keep='last'
                ).sort_values('open_time')
                df_combined.to_csv(cache_path)
            except Exception as e:
                print(f"读取现有缓存失败，创建新文件: {e}")
                df_new.to_csv(cache_path)
        else:
            # 新建或覆盖模式
            df_new.to_csv(cache_path)

    def get_latest_timestamp(self, symbol: str) -> Optional[int]:
        """
        获取缓存中最新的时间戳

        Args:
            symbol: 交易对符号

        Returns:
            最新时间戳（毫秒），如果没有数据返回None
        """
        df = self.read_cache(symbol)
        if df.empty:
            return None

        try:
            return int(df['open_time'].max())
        except Exception as e:
            print(f"获取最新时间戳失败: {e}")
            return None

    def get_cached_data_in_range(
        self,
        symbol: str,
        start_time: Optional[datetime] = None,
        end_time: Optional[datetime] = None,
        limit_rows: Optional[int] = None
    ) -> List[Dict]:
        """
        获取指定时间范围内的缓存数据（支持v0.1.4逐行分析）

        Args:
            symbol: 交易对符号
            start_time: 开始时间
            end_time: 结束时间
            limit_rows: 限制返回的行数（用于逐行分析）

        Returns:
            符合时间范围的K线数据列表
        """
        df = self.read_cache(symbol, start_time, end_time)
        if df.empty:
            return []

        try:
            # 转换为时间戳筛选
            if start_time:
                df = df[df['open_time'] >= int(start_time.timestamp() * 1000)]
            if end_time:
                df = df[df['open_time'] <= int(end_time.timestamp() * 1000)]

            # v0.1.4优化：支持限制行数（用于逐行分析）
            if limit_rows:
                df = df.head(limit_rows)

            return df.to_dict('records')
        except Exception as e:
            print(f"查询缓存数据失败: {e}")
            return []

    def clear_cache(self, symbol: str, start_time: Optional[datetime] = None, end_time: Optional[datetime] = None) -> bool:
        """
        清理指定交易对的缓存

        Args:
            symbol: 交易对符号
            start_time: 开始时间
            end_time: 结束时间

        Returns:
            是否清理成功
        """
        cache_path = self.get_cache_path(symbol, start_time, end_time)
        try:
            if os.path.exists(cache_path):
                os.remove(cache_path)
                return True
        except Exception as e:
            print(f"清理缓存失败: {e}")
        return False

    def get_cache_info(self, symbol: str, start_time: Optional[datetime] = None, end_time: Optional[datetime] = None) -> Dict:
        """
        获取缓存信息

        Args:
            symbol: 交易对符号
            start_time: 开始时间
            end_time: 结束时间

        Returns:
            缓存信息字典
        """
        cache_path = self.get_cache_path(symbol, start_time, end_time)
        df = self.read_cache(symbol, start_time, end_time)

        info = {
            'symbol': symbol,
            'cache_file': cache_path,
            'exists': os.path.exists(cache_path),
            'record_count': len(df),
            'file_size': 0
        }

        if os.path.exists(cache_path):
            info['file_size'] = os.path.getsize(cache_path)

        if not df.empty:
            try:
                info['start_time'] = datetime.fromtimestamp(df['open_time'].min() / 1000)
                info['end_time'] = datetime.fromtimestamp(df['open_time'].max() / 1000)
            except Exception:
                pass

        return info
