import torch
import pygame
import numpy as np
import time
from thop import profile

# 训练函数
def train(model, environment, buffer, optimizer, episodes=1000, batch_size=32, gamma=0.9):
    for episode in range(episodes):
        state = environment.reset()
        episode_experiences = []
        total_reward = 0
        done = False

        while not done:
            state_tensor = torch.tensor(state, dtype=torch.float32).unsqueeze(0)

            start_time = time.time()
            action_probs = model(state_tensor)
            print(f"inference time: {time.time() - start_time}")

            flops, params = profile(model, inputs=(state_tensor,))
            print(f"FLOPs: {flops}")
            print(f"Parameters: {params}")

            if torch.isnan(action_probs).any():
                continue
            action = torch.multinomial(action_probs, 1).item()
            next_state, reward, done = environment.step(action)

            episode_experiences.append((state, action, reward))
            total_reward += reward
            state = next_state

            environment.render()
            pygame.time.delay(50)

        R = 0
        returns = []
        for r in reversed(episode_experiences):
            R = r[2] + gamma * R
            returns.insert(0, R)

        for (state, action, _), R in zip(episode_experiences, returns):
            buffer.push((state, action, R))

        if len(buffer.buffer) >= batch_size:
            batch = buffer.sample(batch_size)
            states, actions, returns, = zip(*batch)

            states = torch.tensor(np.array(states), dtype=torch.float32)
            actions = torch.tensor(actions, dtype=torch.long)
            returns = torch.tensor(returns, dtype=torch.float32)


            action_probs = model(states)
            selected_probs = action_probs.gather(1, actions.unsqueeze(1))
            loss = -torch.mean(torch.log(selected_probs) * returns)

            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            optimizer.step()

        print(f"Episode {episode}, Total Reward: {total_reward}")
