import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import random
from collections import deque
import matplotlib.pyplot as plt
import time
from tqdm import tqdm
import pandas as pd
import math
import torch.nn.functional as F
import os
from torch import nn
import yaml

file_name = os.path.basename(__file__)[:-3]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

import sys
from vehicle_cargo_environment import TransportMatchingEnv
from model import *
from utils import *

with open('config.yaml', 'r', encoding="utf-8") as f:
    config = yaml.safe_load(f)
c = config['c']

loss_list = []



class DQNAgent:
    def __init__(self, input_dim, action_dim, gamma=c['gamma'], epsilon=c['epsilon'], lr=c['lr']):
        self.input_dim = input_dim
        self.action_dim = action_dim
        self.gamma = gamma
        self.epsilon = epsilon
        self.lr = lr
        self.network = LSTMWithAttention(input_dim, action_dim).float().to(device)
        self.target_network = LSTMWithAttention(input_dim, action_dim).float().to(device)
        self.target_network.load_state_dict(self.network.state_dict())
        self.optimizer = optim.Adam(self.network.parameters(), lr=self.lr)
        self.memory = deque(maxlen=20000)
        # 自适应学习率调度器
        # self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=1000, gamma=0.9)  # 自适应学习率调度器

    # def act(self, state):
    #     # print(f'action_dim:{self.action_dim}')
    #     if np.random.random() > self.epsilon:
    #         state = torch.tensor([state], dtype=torch.float32).to(device)
    #         with torch.no_grad():
    #             action = self.network(state).argmax().item()
    #         return action
    #     else:
    #         return np.random.choice(self.action_dim)
    def act(self, state):
        if np.random.random() > self.epsilon:
            state = torch.tensor(np.array([state]), dtype=torch.float32).to(device)
            with torch.no_grad():
                action_values = self.network(state).squeeze()
            # 仅考虑有效动作
            valid_action_values = action_values[env.valid_actions()]
            action = env.valid_actions()[valid_action_values.argmax().item()]
            # print(f'action:{action}')
            return action
        else:
            # 随机选择一个有效动作
            # print(f'valid_actions:{env.valid_actions()}')
            return np.random.choice(env.valid_actions())

    def act_test(self, state, env):
        if np.random.random() > self.epsilon:
            state = torch.tensor([state], dtype=torch.float32).to(device)
            with torch.no_grad():
                action_values = self.network(state).squeeze()
            # 仅考虑有效动作
            valid_action_values = action_values[env.valid_actions()]
            action = env.valid_actions()[valid_action_values.argmax().item()]
            # print(f'action:{action}')
            return action
        else:
            # 随机选择一个有效动作
            # print(f'valid_actions:{env.valid_actions()}')
            return np.random.choice(env.valid_actions())

    def remember(self, state, action, reward, next_state, done):
        self.memory.append((state, action, reward, next_state, done))

    def train(self, batch_size=64):
        if len(self.memory) < batch_size:
            return

        batch = random.sample(self.memory, batch_size)

        # print(f'batch:{len(batch)}')
        states, actions, rewards, next_states, dones = zip(*batch)

        states = torch.tensor(np.array(states), dtype=torch.float32).to(device)
        actions = torch.tensor(np.array(actions), dtype=torch.int64).to(device)
        rewards = torch.tensor(np.array(rewards), dtype=torch.float32).to(device)
        next_states = torch.tensor(np.array(next_states), dtype=torch.float32).to(device)
        dones = torch.tensor(np.array(dones), dtype=torch.float32).to(device)

        current_values = self.network(states).gather(1, actions.unsqueeze(-1)).squeeze(-1)
        next_values = self.target_network(next_states).max(1)[0].detach()
        target_values = rewards + self.gamma * next_values * (1 - dones)

        loss = nn.MSELoss()(current_values, target_values)
        loss_list.append(loss.detach().cpu().numpy())
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        # # 自适应学习率
        # self.scheduler.step()

    def update_target_network(self):
        self.target_network.load_state_dict(self.network.state_dict())

    def decrease_epsilon(self, decrement_value=0.001, min_epsilon=0.1):
        self.epsilon = max(self.epsilon - decrement_value, min_epsilon)


if __name__ == '__main__':
    start = time.time()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    rewards = []
    env = TransportMatchingEnv(10, 10)
    agent = DQNAgent(462, 100)
    # 运行次数
    episodes = c['episodes']
    num_loop_list = []
    # with Bar('进度',max=episodes,fill='#'):
    success_rate_list = []
    action_list = []
    rewards2 = []
    max_reward = -1

    for episode in tqdm(range(episodes)):
        # print(f'epoch:{episode}')
        state = env.reset()
        done = False
        episode_reward = 0
        total_reward = 0
        num_loop = 0
        actions = []
        while not done:
            num_loop = num_loop + 1
            # （432，）
            action = agent.act(state)
            # print('main action: ', action)
            next_state, reward, done, negotiation = env.step(action)
            # 输出决策 TODO
            agent.remember(state, action, reward, next_state, done)
            agent.train()
            episode_reward += reward
            total_reward += reward
            state = next_state
            actions.append(action)

        rewards2.append(env.compute_reward())
        action_list.append(actions)
        if env.compute_reward() > max_reward:
            print('\n------------------\n更新保存的参数')
            max_reward = env.compute_reward()
            torch.save(agent.network.state_dict(), rf'saved_model/{file_name}.pth')
            print(rf'max_reward:{max_reward}')
        success_rate_list.append(np.mean(env.success_rate))

        num_loop_list.append(num_loop)
        agent.decrease_epsilon()
        rewards.append(total_reward / 10)
        if episode % 50 == 0 and episode != 0:
            agent.update_target_network()

    end = time.time()
    print(f'device: {device}')
    print(f'time: {end - start}')

    env.draw('21DLS_e1d1')
