import argparse

from datetime import datetime

import torch
import numpy as np
import os
import time
from tqdm import tqdm
from typing import Dict, Tuple

# Tianshou 相关导入
from tianshou.data import Collector, VectorReplayBuffer, Batch
from tianshou.policy import TD3Policy
from tianshou.trainer import OffpolicyTrainer
from tianshou.env import DummyVectorEnv, SubprocVectorEnv
from tianshou.utils.net.common import Net
from tianshou.utils.net.continuous import Actor, Critic
from tianshou.exploration import GaussianNoise
from torch.utils.tensorboard import SummaryWriter
import tianshou as ts

from env.wireless_power_env_vectorized import WirelessPowerEnv


def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--resume-path', type=str, default=None, help='The path of the saved model to resume training from.')
    args = parser.parse_args()
    return args



def run_training(args):

    now = datetime.now()
    timestamp = now.strftime("%Y%m%d-%H%M%S")
    # 3. Create the unique log path by combining the base name and the timestamp.
    log_path = os.path.join('logs', f'td3_experiment_{timestamp}')
    writer = SummaryWriter(log_path)
    tf_logger = ts.utils.TensorboardLogger(writer, train_interval=50)
    # 2. 环境设置
    env_fn = lambda: WirelessPowerEnv()
    num_workers = os.cpu_count() or 4
    num_workers = 1
    print(f"使用 {num_workers} 个核心...")
    # 留出核心给主进程和其他任务
    train_worker_count = max(1, num_workers)
    test_worker_count = max(1, num_workers // 4)
    train_envs = SubprocVectorEnv([env_fn for _ in range(train_worker_count)])
    test_envs = SubprocVectorEnv([env_fn for _ in range(test_worker_count)])

    # 3. 网络和策略设置
    env = env_fn()
    state_shape = env.observation_space.shape
    action_shape = env.action_space.shape
    max_action = env.action_space.high[0]
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    # Actor 网络
    actor_net = Net(state_shape, hidden_sizes=[256, 256], device=device)
    actor = Actor(actor_net, action_shape, max_action=max_action, device=device).to(device)
    actor_optim = torch.optim.Adam(actor.parameters(), lr=5e-5)

    # 为 TD3 定义两个 Critic 网络和各自的优化器
    critic1_net = Net(state_shape, action_shape, hidden_sizes=[512, 512], concat=True, device=device)
    critic1 = Critic(critic1_net, device=device).to(device)
    critic1_optim = torch.optim.Adam(critic1.parameters(), lr=5e-5)

    critic2_net = Net(state_shape, action_shape, hidden_sizes=[512, 512], concat=True, device=device)
    critic2 = Critic(critic2_net, device=device).to(device)
    critic2_optim = torch.optim.Adam(critic2.parameters(), lr=5e-5)

    # 定义探索噪声 (无需改动)
    exploration_noise = GaussianNoise(sigma=0.10)

    policy = TD3Policy(
        actor=actor,
        actor_optim=actor_optim,
        critic1=critic1,
        critic1_optim=critic1_optim,
        critic2=critic2,
        critic2_optim=critic2_optim,
        tau=1e-3,
        gamma=0.99,
        exploration_noise=exploration_noise,
        action_space=env.action_space,
    )

    if args.resume_path and os.path.exists(args.resume_path):
        print(f"加载已保存的模型: {args.resume_path}")
        # Load the saved state dictionary
        # map_location ensures the model loads correctly whether you're on CPU or GPU
        state_dict = torch.load(args.resume_path, map_location=device)
        policy.load_state_dict(state_dict)
        print("模型加载成功！")
    else:
        print("未找到已保存的模型，将从头开始训练。")

    # 4. 数据收集器
    buffer = VectorReplayBuffer(total_size=12800, buffer_num=len(train_envs), info={})
    train_collector = Collector(policy, train_envs, buffer, exploration_noise=True)
    test_collector = Collector(policy, test_envs)

    # 5. 预热
    warmup_steps = 2000
    print(f"\n--- 开始预热阶段，收集 {warmup_steps} 条随机数据 ---")
    start_time = time.time()
    with tqdm(total=warmup_steps, desc="Warmup Data Collection") as pbar:
        collected_steps = 0
        while collected_steps < warmup_steps:
            result = train_collector.collect(n_step=len(train_envs)*4, random=True)
            steps_this_iter = result['n/st']
            collected_steps += steps_this_iter
            pbar.update(steps_this_iter)
    end_time = time.time()
    print(f"--- 预热完成，耗时: {end_time - start_time:.2f} 秒 ---\n")

    # 6. 训练器
    trainer = OffpolicyTrainer(
        policy=policy, train_collector=train_collector, test_collector=test_collector,
        max_epoch=500, step_per_epoch=1024, step_per_collect=len(train_envs),
        episode_per_test=100,
        batch_size=1024, update_per_step=0.1,
        stop_fn=lambda mean_rewards: mean_rewards >= 10,
        save_best_fn=lambda policy: torch.save(policy.state_dict(), f'best_td3_agent_{timestamp}.pth'),
        logger=tf_logger,
    )
    result = trainer.run()
    # 7. 结束
    print(f"训练完成: {result}")


if __name__ == '__main__':
    # --- NEW: Get arguments and pass them to the training function ---
    args = get_args()
    run_training(args)
