import os
import time
from pathlib import Path
from datetime import datetime as dt, timedelta
from typing import Tuple, List, Optional

import yfinance as yf
import pandas as pd
import numpy as np

# 设置代理
os.environ["HTTP_PROXY"] = "http://192.168.1.128:7890"
os.environ["HTTPS_PROXY"] = "http://192.168.1.128:7890"

# 全局路径
CACHE_DIR = Path("./cache")
CACHE_DIR.mkdir(exist_ok=True)

# 常量
MAX_RETRY = 3

def get_default_start_date(end_date: str) -> str:
    """计算前 60 个交易日的开始日期"""
    end = pd.to_datetime(end_date)
    start = end - timedelta(days=90)  # 90 天覆盖 60 个交易日
    return start.strftime("%Y-%m-%d")

def load_cache_if_ok(ticker: str, start: str, end: str, csv_file: Path) -> Optional[Tuple[pd.DataFrame, str, str]]:
    if not csv_file.exists():
        print(f"[DBG] 缓存文件 {csv_file} 不存在")
        return None
    try:
        df = pd.read_csv(csv_file, header=[0, 1], index_col=0, parse_dates=True)
        if df.empty or len(df) < 60:
            print(f"[DBG] 缓存 {csv_file} 为空或数据不足 60 天")
            return None
        cache_start = df.index[0].strftime("%Y-%m-%d")
        cache_end = df.index[-1].strftime("%Y-%m-%d")
        if df.index[0] <= pd.to_datetime(start) and df.index[-1] >= pd.to_datetime(end):
            print(f"[INF] 使用缓存: {csv_file}")
            return df, cache_start, cache_end
        return df, cache_start, cache_end
    except Exception as e:
        print(f"[WARN] 读取缓存 {csv_file} 失败: {e}, 文件内容可能损坏")
        return None

def get_existing_cache_ranges(ticker: str) -> List[Tuple[str, str, Path]]:
    """获取所有缓存文件的时间段"""
    ranges = []
    for csv_file in CACHE_DIR.glob(f"{ticker}_*.csv"):
        try:
            parts = csv_file.stem.split("_")
            if len(parts) >= 3:
                cache_start, cache_end = parts[-2], parts[-1]
                pd.to_datetime(cache_start)  # 验证日期格式
                pd.to_datetime(cache_end)
                ranges.append((cache_start, cache_end, csv_file))
                print(f"[DBG] 找到缓存范围: {cache_start} 至 {cache_end} -> {csv_file}")
        except Exception:
            print(f"[WARN] 解析缓存文件 {csv_file} 失败")
            continue
    return sorted(ranges, key=lambda x: x[0])

# 程序中认为的时间段是 [start, end]，对于 yf.download 来说是：[start, end)
def download_segment(ticker: str, start: str, end: str) -> pd.DataFrame:
    """下载指定时间段的数据"""
    for i in range(1, MAX_RETRY + 1):
        try:
            print(f"[INF] 尝试下载 {ticker} {start} 至 {end} (第 {i}/{MAX_RETRY} 次)")
            df = yf.download(
                ticker,
                start=start,
                end=(pd.to_datetime(end) + pd.Timedelta(days=1)).strftime("%Y-%m-%d"),
                auto_adjust=True,
                threads=False,
                progress=False,
                group_by="column",
            )
            if not df.empty and len(df) > 0:
                print(f"[INF] 成功下载 {ticker} {start} 至 {end}")
                return df
            print(f"[WARN] 第 {i}/{MAX_RETRY} 次下载 {ticker} {start} 至 {end} 为空")
        except Exception as e:
            print(f"[WARN] 第 {i}/{MAX_RETRY} 次下载 {ticker} {start} 至 {end} 失败: {e}")
        time.sleep(2)
    raise RuntimeError(f"无法获取 {ticker} 数据 {start} 至 {end}")

def merge_cache_files(ticker: str, new_df: pd.DataFrame, new_start: str, new_end: str) -> pd.DataFrame:
    """合并新下载的数据与现有缓存，形成连续时间段"""
    existing_ranges = get_existing_cache_ranges(ticker)
    all_dfs = [(new_df, new_start, new_end)]
    
    print(f"[DBG] 合并前现有缓存范围: {[(s, e) for s, e, _ in existing_ranges]}")
    # 收集需要合并的缓存文件
    for cache_start, cache_end, csv_file in existing_ranges:
        cache_df, _, _ = load_cache_if_ok(ticker, cache_start, cache_end, csv_file) or (None, None, None)
        if cache_df is not None:
            all_dfs.append((cache_df, cache_start, cache_end))
            print(f"[DBG] 添加缓存 {cache_start} 至 {cache_end} 到合并列表")
    
    # 按开始时间排序
    all_dfs.sort(key=lambda x: x[1])
    print(f"[DBG] 排序后合并列表: {[(s, e) for _, s, e in all_dfs]}")
    
    # 合并连续时间段
    merged_dfs = []
    current_df = None
    current_start = None
    current_end = None
    
    for df, start, end in all_dfs:
        if current_df is None:
            current_df = df
            current_start = start
            current_end = end
            print(f"[DBG] 初始化合并段: {start} 至 {end}")
            continue
        
        curr_end_date = pd.to_datetime(current_end)
        start_date = pd.to_datetime(start)
        
        print(f"[DBG] 检查合并: 当前段 {current_start} 至 {current_end}, 新段 {start} 至 {end}")
        # 检查是否紧邻（允许一天误差，考虑交易日）
        if start_date <= curr_end_date + timedelta(days=1):
            print(f"[DBG] 段 {start} 至 {end} 与当前段紧邻，合并")
            current_df = pd.concat([current_df, df]).sort_index()
            current_df = current_df[~current_df.index.duplicated(keep="first")]
            current_end = max(current_end, end)
        else:
            print(f"[DBG] 段 {start} 至 {end} 与当前段不连续，保存当前段")
            merged_dfs.append((current_df, current_start, current_end))
            current_df = df
            current_start = start
            current_end = end
    
    if current_df is not None:
        merged_dfs.append((current_df, current_start, current_end))
        print(f"[DBG] 添加最后一个合并段: {current_start} 至 {current_end}")
    
    # 删除旧缓存文件
    for _, _, csv_file in existing_ranges:
        csv_file.unlink(missing_ok=True)
        print(f"[DBG] 删除旧缓存文件: {csv_file}")
    
    # 保存合并后的文件
    for i, (df, start, end) in enumerate(merged_dfs):
        csv_file = CACHE_DIR / f"{ticker}_{start}_{end}.csv"
        df.to_csv(csv_file)
        print(f"[INF] 保存合并缓存: {csv_file}")
    
    # 返回覆盖请求时间段的数据
    for df, start, end in merged_dfs:
        if pd.to_datetime(start) <= pd.to_datetime(new_start) and pd.to_datetime(end) >= pd.to_datetime(new_end):
            print(f"[DBG] 返回覆盖请求时间段的数据: {start} 至 {end}")
            return df
    print(f"[WARN] 未找到完全覆盖请求时间段的数据")
    return new_df

def download_data(ticker: str, start: str, end: str) -> pd.DataFrame:
    """下载数据，优化缓存使用和数据拼接"""
    start_date = pd.to_datetime(start).date()
    end_date = pd.to_datetime(end).date()
    
    print(f"[DBG] 请求下载 {ticker} {start} 至 {end}")
    # 检查现有缓存
    existing_ranges = get_existing_cache_ranges(ticker)
    print(f"[DBG] 现有缓存范围: {[(s, e) for s, e, _ in existing_ranges]}")
    
    # 第一次下载，直接下载完整时间段
    if not existing_ranges:
        print(f"[INF] 第一次下载 {ticker} {start} 至 {end}")
        df = download_segment(ticker, start, end)
        if not df.empty:
            csv_file = CACHE_DIR / f"{ticker}_{start}_{end}.csv"
            df.to_csv(csv_file)
            print(f"[INF] 保存缓存: {csv_file}")
        return df

    # 检查是否有完全覆盖请求时间段的缓存
    for cache_start, cache_end, csv_file in existing_ranges:
        cache_result = load_cache_if_ok(ticker, start, end, csv_file)
        if cache_result and cache_result[0] is not None:
            df, c_start, c_end = cache_result[0], pd.to_datetime(cache_result[1]).date(), pd.to_datetime(cache_result[2]).date()
            print(f"[DBG] 缓存 {c_start} - {c_end} 完全覆盖请求范围 {start_date} - {end_date}")
            if c_start <= start_date and c_end >= end_date:
                # 截取请求时间段的数据
                df_subset = df.loc[start_date:end_date].copy()
                print(f"[INF] 使用缓存子集: {csv_file}, 范围 {start} 至 {end}")
                return df_subset
    
    # 确定需要下载的时间段
    segments_to_download = []
    current_date = start_date
    existing_ranges = [(pd.to_datetime(s).date(), pd.to_datetime(e).date(), p) for s, e, p in existing_ranges]
    print(f"[DBG] 按时间排序的现有缓存范围: {existing_ranges}")
   
    while current_date <= end_date:
        segment_start = current_date
        segment_end = end_date
        has_overlap = False

        print(f"[DBG] 当前检查时间: {current_date}")
        for cache_start, cache_end, _ in existing_ranges:
            if cache_start <= segment_start < cache_end or cache_start < segment_end <= cache_end:
                print(f"[DBG] 找到重叠缓存: {cache_start} 至 {cache_end}")
                if cache_start <= segment_start and segment_end <= cache_end:
                    print(f"[DBG] 缓存完全覆盖当前段，跳出")
                    current_date = cache_end + timedelta(days=1)
                    has_overlap = True
                    break
                elif cache_start <= segment_start:
                    segment_start = cache_end + timedelta(days=1)
                    print(f"[DBG] 更新段开始时间至 {segment_start}")
                else:
                    segment_end = cache_start - timedelta(days=1)
                    print(f"[DBG] 调整段结束时间至 {segment_end}")
                break

        if has_overlap:
            continue

        if segment_start <= segment_end:
            print(f"[DBG] 确定下载段: {segment_start.strftime('%Y-%m-%d')} 至 {segment_end.strftime('%Y-%m-%d')}")
            segments_to_download.append((segment_start.strftime("%Y-%m-%d"), segment_end.strftime("%Y-%m-%d")))
            current_date = segment_end + timedelta(days=1)
        else:
            current_date = end_date + timedelta(days=1)  # 确保跳出循环
     
    print(f"[DBG] 需下载的段: {segments_to_download}")
    # 下载缺失的段
    downloaded_dfs = []
    for seg_start, seg_end in segments_to_download:
        print(f"[INF] 下载 {ticker} {seg_start} 至 {seg_end}")
        df = download_segment(ticker, seg_start, seg_end)
        if not df.empty:
            downloaded_dfs.append((df, seg_start, seg_end))
    
    # 合并下载的数据
    if downloaded_dfs:
        new_df = pd.concat([df for df, _, _ in downloaded_dfs]).sort_index()
        new_df = new_df[~new_df.index.duplicated(keep="first")]
        new_start = min(seg_start for _, seg_start, _ in downloaded_dfs)
        new_end = max(seg_end for _, _, seg_end in downloaded_dfs)
        print(f"[INF] 合并 {ticker} {new_start} 至 {new_end}")
        return merge_cache_files(ticker, new_df, new_start, new_end)
   
    # 如果没有下载新数据，使用现有缓存
    for cache_start, cache_end, csv_file in existing_ranges:
        cache_result = load_cache_if_ok(ticker, start, end, csv_file)
        if cache_result:
            print(f"[INF] 无需下载，使用现有缓存 {cache_start} 至 {cache_end}")
            return cache_result[0]
    
    raise RuntimeError(f"无法获取 {ticker} 数据或数据不足")

def _ensure_close(df: pd.DataFrame) -> pd.DataFrame:
    if "Close" not in df.columns:
        if "Adj Close" in df.columns:
            df = df.rename(columns={"Adj Close": "Close"})
        elif "Price" in df.columns:
            df = df.rename(columns={"Price": "Close"})
        else:
            raise ValueError("缺少 Close 列")
    return df


def flatten_columns(df: pd.DataFrame, ticker: str) -> pd.DataFrame:
    if not isinstance(df.columns, pd.MultiIndex):
        return _ensure_close(df)
    lvl0, lvl1 = df.columns.levels
    if ticker in lvl0:
        df = df[ticker].copy()
    elif ticker in lvl1: 
        df = df.xs(ticker, axis=1, level=1).copy()
    else:
        raise ValueError("未找到匹配列")
    return _ensure_close(df)

