import os

import numpy as np
import torch
import torch.nn as nn
import argparse
from model import *
from prepare_data import *
import torch.optim as optim
from focal_loss_example import *
from RL_model import RL_model
from baseline_models import Base_model
from Actor_Critic_model import AC_model
import warnings
import time
import os
from utils import *
warnings.filterwarnings('ignore')

if __name__ == '__main__':
    get_pbs_dimension(f'./data/1.xlsx') 
    os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
    # print(torch.cuda.is_available())
    torch.cuda.set_device(0)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    SEED = 0
    torch.manual_seed(SEED)
    torch.cuda.manual_seed(SEED)

    parser = argparse.ArgumentParser()

    # parser.add_argument('--batchSize', type=int, default=32, help='input batch size')  # 原本是10000 现改5000
    # parser.add_argument('--batch_size', type=int, default=32, help='input batch size')  # 原本是10000 现改5000
    # parser.add_argument('--hidden_size', type=int, default=200, help='hidden state size')  # 原本是200 现改500
    # parser.add_argument('--hidden_dim', type=int, default=200, help='hidden state size')  # 原本是200 现改500
    # parser.add_argument('--epoch', type=int, default=2, help='number of epochs to train for') #这里为了方便调试代码，我改成1了
    # parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
    # # parser.add_argument('--l2', type=float, default=0.0001, help='l2 penalty')
    # parser.add_argument('--photo_size', type=int, default=128, help='The size of the image')
    # parser.add_argument('--num_layers', type=int, default=6, help='layers nums')
    # parser.add_argument('--Linear_nums', type=int, default=3, help='the number of linear layers')
    # parser.add_argument('--num_classes', type=int, default=77, help='the number of categories')
    # parser.add_argument('--num_heads', type=int, default=6, help='attention heads nums')
    # parser.add_argument('--mlp_ratio', type=float, default=1, help='the ratio of hidden layers in the middle')
    # parser.add_argument('--Kernel_size1', type=int, default=2, help='the first layer convolution kernel size')
    # parser.add_argument('--Kernel_size2', type=int, default=2, help='the second layer convolution kernel size')
    # parser.add_argument('--Stride1', type=int, default=2, help='the first layer convolution stride size')
    # parser.add_argument('--Stride2', type=int, default=2, help='the second layer convolution stride size')
    # parser.add_argument('--pb_path', type=str, default='./data', help='the path of picture books')
    # parser.add_argument('--state_nums', type=int, default=77, help='the number of aesthetic dimensions')
    # parser.add_argument('--gamma', type=float, default=0.99, help='the reinforcement learning discount rate')
    # parser.add_argument('--epsilon', type=float, default=0.8, help='the probability of e greddy exploitation')
    # parser.add_argument('--least_score', type=float, default=0.8, help='Threshold used for screening candidate D')
    # parser.add_argument('--path_len', type=int, default=5, help='the number of recommended picture books')
    # parser.add_argument('--D', type=float, default=1.7, help='zy explain')
    # parser.add_argument('--a', type=int, default=5, help='zy explain')


    parser.add_argument('--batchSize', type=int, default=20, help='input batch size')  # 原本是10000 现改5000
    parser.add_argument('--batch_size', type=int, default=20, help='input batch size')  # 原本是10000 现改5000
    parser.add_argument('--hidden_size', type=int, default=200, help='hidden state size')  # 原本是200 现改500
    parser.add_argument('--hidden_dim', type=int, default=100, help='hidden state size')  # 原本是200 现改500
    parser.add_argument('--epoch', type=int, default=5, help='number of epochs to train for') #这里为了方便调试代码，我改成1了
    parser.add_argument('--lr', type=float, default=0.1, help='learning rate')
    # parser.add_argument('--l2', type=float, default=0.0001, help='l2 penalty')
    parser.add_argument('--num_layers', type=int, default=6, help='layers nums')
    parser.add_argument('--num_heads', type=int, default=6, help='attention heads nums')
    parser.add_argument('--mlp_ratio', type=float, default=1, help='the ratio of hidden layers in the middle')
    parser.add_argument('--Kernel_size1', type=int, default=2, help='the first layer convolution kernel size')
    parser.add_argument('--Kernel_size2', type=int, default=2, help='the second layer convolution kernel size')
    parser.add_argument('--Stride1', type=int, default=2, help='the second layer convolution stride size')
    parser.add_argument('--Stride2', type=int, default=2, help='the second layer convolution stride size')
    parser.add_argument('--num_classes', type=int, default=77, help='the number of categories')
    parser.add_argument('--num_classes_last', type=int, default=7, help='the number of categories to the last label')
    parser.add_argument('--photo_size', type=int, default=128, help='the number of categories to the last label')
    parser.add_argument('--Linear_nums', type=int, default=3, help='the number of categories to the last label')
    parser.add_argument('--pb_path', type=str, default='./data', help='the number of categories to the last label')
    parser.add_argument('--state_nums', type=int, default=77, help='the number of categories to the last label')
    parser.add_argument('--gamma', type=float, default=0.99, help='the number of categories to the last label')
    parser.add_argument('--epsilon', type=float, default=0.8, help='the number of categories to the last label')
    parser.add_argument('--target_update_nums', type=int, default=5, help='the number of categories to the last label')
    parser.add_argument('--least_score', type=float, default=0.8, help='the number of categories to the last label')
    parser.add_argument('--ReplayBuffer_capacity', type=int, default=100, help='the number of categories to the last label')
    parser.add_argument('--min_size', type=int, default=50, help='the number of categories to the last label')
    parser.add_argument('--path_len', type=int, default=5, help='the number of categories to the last label')
    parser.add_argument('--D', type=float, default=1.7, help='the number of categories to the last label')
    parser.add_argument('--a', type=int, default=5, help='the number of categories to the last label')
    parser.add_argument('--model_name', type=str, default=str('Aes'), help='the number of categories to the last label')
    parser.add_argument('--mu', type=float, default=1, help='the number of categories to the last label')
    parser.add_argument('--embedding_dim', type=int, default=50, help='the number of categories to the last label')
    parser.add_argument('--msepara', type=int, default=100, help='the number of categories to the last label')
    parser.add_argument('--expand_name', type=str, default=str('embedding_dim'), help='the number of categories to the last label')
    parser.add_argument('--is_adjust_parameter', type=str, default=str('true'), help='the number of categories to the last label')







    opt = parser.parse_args()

    # train_set, test_set, train_dict, valid_set = get_data(opt.photo_size)
    train_set, test_set = get_RL_data(opt.photo_size)
    # train_data = DataLoader(dataset=train_set, batch_size=opt.batchSize, collate_fn=collate_v2, shuffle=True,
    #                         pin_memory=True, num_workers=0, drop_last=True)
    # test_data = DataLoader(dataset=test_set, batch_size=opt.batchSize, collate_fn=collate_v2, pin_memory=True,
    #                        num_workers=0, drop_last=False)
    # # test_data = DataLoader(dataset=test_set, batch_size=opt.batchSize, collate_fn=collate_test, pin_memory=True,
    # #                        num_workers=0, drop_last=False)
    # valid_data = DataLoader(dataset=valid_set, batch_size=opt.batchSize, collate_fn=collate_v2, pin_memory=True,
    #                        num_workers=0, drop_last=True)
    train_data = DataLoader(dataset=train_set, batch_size=opt.batchSize, collate_fn=collate_RL, pin_memory=True,
                           num_workers=0, drop_last=True)
    test_data = DataLoader(dataset=test_set, batch_size=opt.batchSize, collate_fn=collate_RL, pin_memory=True,
                           num_workers=0, drop_last=True)


    embedding_dim = int((opt.photo_size - opt.Kernel_size1) // opt.Stride1 + 1)
    embedding_dim = int((embedding_dim - 2) // 2 + 1)
    embedding_dim = int((embedding_dim - opt.Kernel_size2 + 2) // opt.Stride2 + 1)
    embedding_dim = int((embedding_dim - 2) // 2 + 1)
    embedding_dim = int(embedding_dim * embedding_dim * 3)
    #
    # model = RL_model(opt, embedding_dim).cuda()

    # model = Base_model(opt, embedding_dim).cuda()
    model = AC_model(opt, embedding_dim).cuda()
    for epoch in range(opt.epoch):
        # print(f'the current epoch is {epoch}')
        model.fit(train_data)
        if epoch == opt.epoch - 1:
            model.evaluate(test_data)
            # torch.save(model.actor_critic.state_dict(), 'actor_critic_model.pth')
    # # optimizer = optim.Adam(model.parameters(), lr=opt.lr, weight_decay=opt.l2)
    # optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    # loss_func1 = nn.BCEWithLogitsLoss()
    #
    # # loss_func1 = FocalLoss()
    #
    # # loss_func1 = nn.BCELoss()
    # # loss_func1 = nn.CrossEntropyLoss()
    # # loss_func2 = nn.CrossEntropyLoss()
    # loss_func2 = FocalLoss()
    # # loss_func2 = nn.BCEWithLogitsLoss()
    # # loss_func2 = nn.CTCLoss()
    # # strat_time = time.time()
    # # for epoch in range(opt.epoch):
    # #     model, epoch_loss=train(model,train_data, train_dict, opt, epoch, loss_func1=loss_func1, loss_func2=loss_func2, optimizer=optimizer)
    # #     # with open(f"{opt.photo_size}_{opt.Linear_nums}_{opt.lr}.txt", mode='a') as file:
    # #     #     out = str(f"the {epoch} loss is {epoch_loss}\n")
    # #     #     file.write(out)
    # #     # test(model, valid_data)
    # #     # test(model, test_data)
    # #     # if epoch % 20 == 0:
    # #     if epoch == opt.epoch - 1:
    # #
    # #         print("Predicting start")
    # #         # end_time = time.time()
    # #         ACC_now, Recall_now, HR_now, MRR_now, NDCG_now = test(model,test_data)
    # #         with open(f"result.txt", mode='a') as file:
    # #             # out = str(f"mae is {mae}, mse is {mse}, rmse is {rmse}\n")
    # #             # out = str(f"模型运行时间: {end_time - strat_time}s")
    # #             file.write(f'ACC:{ACC_now}, Recall:{Recall_now}, HR:{HR_now}, MRR:{MRR_now}, NDCG:{NDCG_now}')
    # #         print("Predicting end")