import gymnasium as gym
from dense01 import Dense
import torch
import random
from tqdm import tqdm
import numpy as np

def setup_seed(seed):
    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)

setup_seed(354)

agent = Dense(4, 2)
agent.train()
loss = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(agent.parameters(), lr=0.01)

def gen_train_data(o_list, a_list, r_list):
    dataloader = []
    for o, a, r in zip(o_list, a_list, r_list):
        dataloader.append([o, a, r])
    dataloader.sort(key=lambda x: x[2], reverse=True)
    return dataloader[:int(len(dataloader)*0.3)]

def learn(t_data):
    running_loss = 0
    train_data_o = []
    train_data_a = []
    for o, a, r in t_data:
        for eo, ea in zip(o, a):
    #         train_data_o.append(eo)
    #         train_data_a.append(ea)
    # train_data_o = torch.tensor(train_data_o).float()
    # train_data_a = torch.tensor(train_data_a).long()
            eo = torch.tensor(eo).float()
            ea = torch.tensor(ea).long()
            optimizer.zero_grad()
            output = agent(eo)
            run_loss = loss(output, ea)
            run_loss.backward()
            optimizer.step()
            running_loss += run_loss.item()

if __name__ == '__main__':
    env = gym.make("CartPole-v1")
    observation, info = env.reset()
    episode_over = False
    
    ob_list = []
    action_list = []
    re_list = []

    reward_sum = 0
    
    ep_ob_list = [] 
    ep_ac_list = []
    
    ep_ob_list.append(observation)
    action = torch.argmax(agent(torch.from_numpy(observation).float())).item()
    ep_ac_list.append(action)
    
    while True:
        observation, reward, terminated, truncated, info = env.step(action)
        a_probe= agent(torch.from_numpy(observation).float()).detach().numpy()
        action = np.random.choice(len(a_probe), p=a_probe)
        # if (random.random() < 0.1):
        #     action = random.randint(0, 1)
        ep_ob_list.append(observation)
        ep_ac_list.append(action)
        reward_sum += reward
        episode_over = terminated or truncated
        if (episode_over):

            ob_list.append(ep_ob_list)
            action_list.append(ep_ac_list)
            re_list.append(reward_sum)

            if (len(re_list) == 16):
                if (sum(re_list) / 16 >= 500):
                    torch.save(agent, "model.pth")
                    exit()
                t_data = gen_train_data(ob_list, action_list, re_list)
                learn(t_data)
                print(f"\r{sum(re_list) / 16}", end="")
                ob_list = []
                action_list = []
                re_list = []
    
            reward_sum = 0
            ep_ob_list = []
            ep_ac_list = []
            env.reset()
