import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from collections import deque
import random


class DQN(nn.Module):
    def __init__(self, input_shape, n_actions):
        super(DQN, self).__init__()

        self.conv = nn.Sequential(
            nn.Conv2d(input_shape[2], 32, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.AdaptiveAvgPool2d((1, 1))  # 添加自适应池化层以处理不同大小的输入
        )

        conv_out_size = self._get_conv_out(input_shape)

        self.fc = nn.Sequential(
            nn.Linear(conv_out_size + 11, 512),  # +1 for sun and +10 for plants_remaining
            nn.ReLU(),
            nn.Linear(512, n_actions)
        )

    def _get_conv_out(self, shape):
        # 创建一个形状正确的张量，并调整其维度顺序
        o = torch.zeros(1, *shape)
        o = o.permute(0, 3, 1, 2)
        o = self.conv(o)
        return int(np.prod(o.size()))

    def forward(self, lawn, sun, plants_remaining):
        # 调整输入维度顺序：[batch, height, width, channels] -> [batch, channels, height, width]
        lawn = lawn.permute(0, 3, 1, 2)
        conv_out = self.conv(lawn).view(lawn.size()[0], -1)
        extra_input = torch.cat([sun, plants_remaining], dim=1)
        return self.fc(torch.cat([conv_out, extra_input], dim=1))


class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)

    def push(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        return random.sample(self.buffer, batch_size)

    def __len__(self):
        return len(self.buffer)


class DQNAgent:
    def __init__(self, env, learning_rate=1e-4, gamma=0.99,
                 epsilon_start=1.0, epsilon_final=0.05, epsilon_decay=0.9995,
                 memory_size=50000, batch_size=64, target_update=2000):
        self.env = env
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        lawn_shape = env.observation_space['lawn'].shape
        n_actions = env.num_rows * env.num_cols * len(env.plants)

        self.policy_net = DQN(lawn_shape, n_actions).to(self.device)
        self.target_net = DQN(lawn_shape, n_actions).to(self.device)
        self.target_net.load_state_dict(self.policy_net.state_dict())

        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=learning_rate)
        self.memory = ReplayBuffer(memory_size)

        self.batch_size = batch_size
        self.gamma = gamma
        self.epsilon = epsilon_start
        self.epsilon_final = epsilon_final
        self.epsilon_decay = epsilon_decay
        self.target_update = target_update
        self.steps_done = 0

    def select_action(self, state):
        if random.random() > self.epsilon:
            with torch.no_grad():
                lawn = torch.FloatTensor(state['lawn']).unsqueeze(0).to(self.device)
                sun = torch.FloatTensor(state['sun']).unsqueeze(0).to(self.device)
                plants = torch.FloatTensor(state['plants_remaining']).unsqueeze(0).to(self.device)

                q_values = self.policy_net(lawn, sun, plants)
                action_idx = q_values.max(1)[1].item()

                row = action_idx // (self.env.num_cols * len(self.env.plants))
                col = (action_idx % (self.env.num_cols * len(self.env.plants))) // len(self.env.plants)
                plant_type = action_idx % len(self.env.plants)

                return [row, col, plant_type]
        else:
            return self.env.action_space.sample()

    def train_step(self):
        if len(self.memory) < self.batch_size:
            return

        transitions = self.memory.sample(self.batch_size)
        batch = list(zip(*transitions))

        state_batch = {
            'lawn': torch.FloatTensor(np.stack([s['lawn'] for s in batch[0]])).to(self.device),
            'sun': torch.FloatTensor(np.stack([s['sun'] for s in batch[0]])).to(self.device),
            'plants_remaining': torch.FloatTensor(np.stack([s['plants_remaining'] for s in batch[0]])).to(self.device)
        }

        action_batch = torch.LongTensor([
            a[0] * self.env.num_cols * len(self.env.plants) +
            a[1] * len(self.env.plants) +
            a[2] for a in batch[1]
        ]).to(self.device)

        reward_batch = torch.FloatTensor(batch[2]).to(self.device)

        next_state_batch = {
            'lawn': torch.FloatTensor(np.stack([s['lawn'] for s in batch[3]])).to(self.device),
            'sun': torch.FloatTensor(np.stack([s['sun'] for s in batch[3]])).to(self.device),
            'plants_remaining': torch.FloatTensor(np.stack([s['plants_remaining'] for s in batch[3]])).to(self.device)
        }

        done_batch = torch.FloatTensor(batch[4]).to(self.device)

        state_action_values = self.policy_net(
            state_batch['lawn'],
            state_batch['sun'],
            state_batch['plants_remaining']
        ).gather(1, action_batch.unsqueeze(1))

        next_state_values = self.target_net(
            next_state_batch['lawn'],
            next_state_batch['sun'],
            next_state_batch['plants_remaining']
        ).max(1)[0].detach()

        expected_state_action_values = reward_batch + (1 - done_batch) * self.gamma * next_state_values

        loss = nn.MSELoss()(state_action_values, expected_state_action_values.unsqueeze(1))

        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

        self.epsilon = max(self.epsilon_final, self.epsilon * self.epsilon_decay)

        if self.steps_done % self.target_update == 0:
            self.target_net.load_state_dict(self.policy_net.state_dict())

        self.steps_done += 1

        return loss.item()

    def save_model(self, path):
        """保存模型到指定路径"""
        torch.save({
            'policy_net_state_dict': self.policy_net.state_dict(),
            'target_net_state_dict': self.target_net.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'epsilon': self.epsilon,
            'steps_done': self.steps_done
        }, path)

    def load_model(self, path):
        """从指定路径加载模型"""
        checkpoint = torch.load(path)
        self.policy_net.load_state_dict(checkpoint['policy_net_state_dict'])
        self.target_net.load_state_dict(checkpoint['target_net_state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        self.epsilon = checkpoint['epsilon']
        self.steps_done = checkpoint['steps_done']


def train(env, agent, num_episodes=2000):
    episode_rewards = []

    for episode in range(num_episodes):
        state = env.reset()
        episode_reward = 0
        done = False

        while not done:
            action = agent.select_action(state)
            next_state, reward, done, _ = env.step(action)

            agent.memory.push(state, action, reward, next_state, done)
            state = next_state
            episode_reward += reward

            loss = agent.train_step()

            if done:
                episode_rewards.append(episode_reward)
                print(
                    f"Episode {episode + 1}/{num_episodes}, Reward: {episode_reward:.2f}, Epsilon: {agent.epsilon:.2f}")
                if loss is not None:
                    print(f"Loss: {loss:.4f}")
                env.render()
                break

    return episode_rewards