import os
import sys
import datetime as dt
from pathlib import Path

import pandas as pd
import numpy as np

# Ensure qlib is importable from this repo/venv
import qlib
from qlib.tests.data import GetData
from qlib.data import D

# Use PyTorch for deep learning time-series model
import torch
import torch.nn as nn
import torch.optim as optim
import contextlib
try:
    from torch.cuda.amp import autocast, GradScaler
    _HAS_CUDA_AMP = True
except Exception:
    autocast = contextlib.nullcontext  # type: ignore
    class _GradScalerStub:  # type: ignore
        def __init__(self, enabled=False):
            pass
        def scale(self, loss):
            return loss
        def step(self, opt):
            opt.step()
        def update(self):
            pass
    GradScaler = _GradScalerStub  # type: ignore
    _HAS_CUDA_AMP = False

# Optional DirectML backend for Windows GPU acceleration
try:
    import torch_directml
    _HAS_DML = True
except Exception:
    torch_directml = None  # type: ignore
    _HAS_DML = False


def ensure_data(provider_uri: str = "~/.qlib/qlib_data/cn_data"):
    provider_uri = str(Path(provider_uri).expanduser())
    # Download latest CN 1d data if missing
    GetData(delete_zip_file=True).qlib_data(target_dir=provider_uri, interval="1d", region="cn", exists_skip=True)
    # Init qlib
    qlib.init(provider_uri=provider_uri, region="cn", expression_cache=None, dataset_cache=None)
    return provider_uri


def load_saic_close(start: str, end: str) -> pd.DataFrame:
    # SAIC Motor instrument code in Qlib CN: SH600104
    instrument = ["SH600104"]
    fields = ["$close", "$factor"]
    df = D.features(instrument, fields, start_time=start, end_time=end, freq="day")
    df.columns = ["close", "factor"]
    # Recover original (unadjusted) close price
    df["raw_close"] = df["close"] / df["factor"]
    # Drop NaNs and sort
    df = df.dropna().sort_index()
    return df


# ---------------------
# Deep Learning Forecaster (LSTM)
# ---------------------
class SequenceDataset(torch.utils.data.Dataset):
    def __init__(self, series: pd.Series, lookback: int):
        self.series = torch.tensor(series.values, dtype=torch.float32)
        self.lookback = lookback

    def __len__(self):
        return len(self.series) - self.lookback

    def __getitem__(self, idx):
        x = self.series[idx : idx + self.lookback].unsqueeze(-1)  # [lookback, 1]
        y = self.series[idx + self.lookback]  # scalar
        return x, y


class LSTMRegressor(nn.Module):
    def __init__(self, input_size=1, hidden_size=64, num_layers=2, dropout=0.1):
        super().__init__()
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, dropout=dropout)
        self.fc = nn.Linear(hidden_size, 1)

    def forward(self, x):
        out, _ = self.lstm(x)
        out = self.fc(out[:, -1, :])
        return out.squeeze(-1)


def select_device(prefer: str = "auto"):
    prefer = (prefer or "auto").lower()
    if prefer == "cuda" and torch.cuda.is_available():
        return torch.device("cuda")
    if prefer == "dml" and _HAS_DML:
        return torch_directml.device()
    if prefer == "cpu":
        return torch.device("cpu")
    # auto: prefer CUDA if usable, otherwise DirectML, else CPU
    if torch.cuda.is_available():
        try:
            # basic sanity op; won't catch all kernel errors but filters obvious issues
            torch.zeros(1, device="cuda")
            return torch.device("cuda")
        except Exception:
            pass
    if _HAS_DML:
        return torch_directml.device()
    return torch.device("cpu")


def train_lstm(series: pd.Series, lookback=30, epochs=120, lr=1e-3, batch_size=512, device=None):
    # Prefer GPU if available with safe selection
    if device is None:
        device = select_device(os.environ.get("PREDICT_DEVICE", "auto"))
    print(f"Training device: {device}")
    use_cuda = (isinstance(device, torch.device) and device.type == "cuda") or (isinstance(device, str) and device == "cuda")
    if use_cuda:
        try:
            torch.backends.cudnn.benchmark = True
        except Exception:
            pass
    ds = SequenceDataset(series, lookback)
    dl = torch.utils.data.DataLoader(
        ds,
        batch_size=batch_size,
        shuffle=True,
        drop_last=True,
        num_workers=0,
        pin_memory=use_cuda,
    )
    model = LSTMRegressor().to(device)
    optimizer = optim.Adam(model.parameters(), lr=lr)
    loss_fn = nn.MSELoss()
    scaler = GradScaler(enabled=(use_cuda and _HAS_CUDA_AMP))

    model.train()
    for ep in range(epochs):
        epoch_loss = 0.0
        for x, y in dl:
            x = x.to(device, non_blocking=use_cuda)
            y = y.to(device, non_blocking=use_cuda)
            optimizer.zero_grad(set_to_none=True)
            if use_cuda and _HAS_CUDA_AMP:
                with autocast(device_type="cuda", dtype=torch.float16):
                    pred = model(x)
                    loss = loss_fn(pred, y)
                scaler.scale(loss).backward()
                scaler.step(optimizer)
                scaler.update()
            else:
                pred = model(x)
                loss = loss_fn(pred, y)
                loss.backward()
                optimizer.step()
            epoch_loss += loss.item()
        if ep % 20 == 0:
            print(f"Epoch {ep}: loss={epoch_loss/len(dl):.6f}")
    return model, lookback, device


def deep_forecast_price(series: pd.Series, forecast_dates: pd.DatetimeIndex, lookback=30, epochs=120, device: "auto|cuda|dml|cpu" = "auto") -> pd.DataFrame:
    # Work in log-price to stabilize training, then exponentiate predictions back
    log_series = pd.Series(np.log(series.values), index=series.index)
    sel_dev = select_device(device) if isinstance(device, str) else (device or select_device("auto"))
    model, lb, device = train_lstm(log_series, lookback=lookback, epochs=epochs, device=sel_dev)

    from collections import deque

    window = deque(log_series.values[-lb:].tolist(), maxlen=lb)
    preds_log = []
    model.eval()
    with torch.no_grad():
        for _ in range(len(forecast_dates)):
            x = torch.tensor(list(window), dtype=torch.float32).unsqueeze(0).unsqueeze(-1).to(device)
            yhat = model(x).item()
            preds_log.append(yhat)
            window.append(yhat)

    preds = np.exp(np.array(preds_log))
    out = pd.DataFrame({"datetime": forecast_dates, "forecast_raw_close": preds})
    out.set_index("datetime", inplace=True)
    return out


def get_oct_late_trading_days(end_year: int = 2025) -> pd.DatetimeIndex:
    # Get future trading calendar if available; fall back to business days
    try:
        cal = D.calendar(start_time=f"{end_year}-10-01", end_time=f"{end_year}-10-31", freq="day", future=True)
        cal = pd.DatetimeIndex(cal)
    except Exception:
        # Fallback: assume China trading days Mon-Fri excluding weekends
        cal = pd.date_range(f"{end_year}-10-01", f"{end_year}-10-31", freq="B")
    # Late October: from 20th onward
    late = cal[cal >= pd.Timestamp(f"{end_year}-10-20")]
    return late


def main():
    provider_uri = ensure_data()
    # We need history up to just before the forecast window
    history_end = "2025-10-19"
    history_start = "2008-01-01"
    df = load_saic_close(history_start, history_end)
    if df.empty:
        print("No historical data loaded for SH600104. Please check data initialization.")
        sys.exit(2)

    # Determine forecast dates for late Oct 2025
    forecast_dates = get_oct_late_trading_days(end_year=2025)
    forecast_dates = forecast_dates[forecast_dates > df.index.get_level_values("datetime").max()]
    if len(forecast_dates) == 0:
        # If future calendar not available, forecast next 8 business days
        last_date = df.index.get_level_values("datetime").max()
        next_days = pd.bdate_range(last_date + pd.Timedelta(days=1), periods=8)
        forecast_dates = next_days

    # Prepare series
    series = df["raw_close"].dropna()
    # Fit DL model and forecast
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("--device", choices=["auto", "cuda", "dml", "cpu"], default="auto")
    args = parser.parse_args()
    fc_df = deep_forecast_price(series=series, forecast_dates=forecast_dates, lookback=30, epochs=180, device=args.device)

    # Save outputs
    out_dir = Path("examples/reports").resolve()
    out_dir.mkdir(parents=True, exist_ok=True)
    out_path = out_dir.joinpath("saic_price_forecast_2025_10_late.csv")
    fc_df.to_csv(out_path, float_format="%.4f")

    # Print summary
    print("Provider URI:", provider_uri)
    print("Last history date:", df.index.get_level_values("datetime").max())
    print("Forecast dates:")
    print(forecast_dates)
    print("Forecast (raw close, CNY):")
    print(fc_df)
    print("Saved to:", out_path)


if __name__ == "__main__":
    main()