import argparse

import sys
import torch
import qlib
from qlib.contrib.data.handler import Alpha158
from qlib.data.dataset import TSDatasetH
from qlib.contrib.model.pytorch_alstm_ts import ALSTM
from qlib.constant import REG_CN

from finetune.config import Config

import os

from finetune.github_qlib_dataset import GetData

os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'

sys.path.append("../")
from model import Kronos, KronosTokenizer, KronosPredictor


def download_data():
    data_uri = "~/.qlib/qlib_data/cn_data"
    qlib.init(provider_uri=data_uri, region=REG_CN)
    get_data = GetData()
    get_data.qlib_data(target_dir=data_uri, region=REG_CN)


def dump_model(run_config):
    # 1. Load Model and Tokenizer
    tokenizer = KronosTokenizer.from_pretrained("NeoQuasar/Kronos-Tokenizer-base")
    model = Kronos.from_pretrained("NeoQuasar/Kronos-base")

    tokenizer.save_pretrained(run_config["tokenizer_path"])
    model.save_pretrained(run_config["model_path"])


def main():
    """Main function to set up config, run inference, and execute backtesting."""
    parser = argparse.ArgumentParser(description="Run Kronos Inference and Backtesting")
    parser.add_argument("--device", type=str, default="cuda:0", help="Device for inference (e.g., 'cuda:0', 'cpu')")
    args = parser.parse_args()
    # --- 1. Configuration Setup ---
    base_config = Config()

    # Create a dedicated dictionary for this run's configuration
    run_config = {
        'device': args.device,
        'data_path': base_config.dataset_path,
        'result_save_path': base_config.backtest_result_path,
        'result_name': base_config.backtest_save_folder_name,
        'tokenizer_path': base_config.finetuned_tokenizer_path,
        'model_path': base_config.finetuned_predictor_path,
        'max_context': base_config.max_context,
        'pred_len': base_config.predict_window,
        'clip': base_config.clip,
        'T': base_config.inference_T,
        'top_k': base_config.inference_top_k,
        'top_p': base_config.inference_top_p,
        'sample_count': base_config.inference_sample_count,
        'batch_size': base_config.backtest_batch_size,
    }

    print("--- Running with Configuration ---")
    for key, val in run_config.items():
        print(f"{key:>20}: {val}")
    print("-" * 35)

    download_data()
    # dump_model(run_config)


if __name__ == '__main__':
    main()
