import os

import numpy as np
import torch
import torch.nn as nn
import argparse
from model import *
from prepare_data import *
import torch.optim as optim
from focal_loss_example import *
import warnings
import time
import os
from utils import *
from torch.utils.data import DataLoader
warnings.filterwarnings('ignore')

if __name__ == '__main__':
    os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
    print(torch.cuda.is_available())
    torch.cuda.set_device(0)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    SEED = 0
    torch.manual_seed(SEED)
    torch.cuda.manual_seed(SEED)

    parser = argparse.ArgumentParser()

    parser.add_argument('--batchSize', type=int, default=10, help='input batch size')  # 原本是10000 现改5000
    parser.add_argument('--hidden_size', type=int, default=1024, help='hidden state size')  # 原本是200 现改500
    parser.add_argument('--epoch', type=int, default=10, help='number of epochs to train for')
    parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
    # parser.add_argument('--l2', type=float, default=0.0001, help='l2 penalty')
    parser.add_argument('--num_layers', type=int, default=6, help='layers nums')
    parser.add_argument('--num_heads', type=int, default=6, help='attention heads nums')
    parser.add_argument('--mlp_ratio', type=float, default=1.0, help='the ratio of hidden layers in the middle')
    parser.add_argument('--Kernel_size1', type=int, default=2, help='the first layer convolution kernel size')
    parser.add_argument('--Kernel_size2', type=int, default=2, help='the second layer convolution kernel size')
    parser.add_argument('--Stride1', type=int, default=2, help='the second layer convolution stride size')
    parser.add_argument('--Stride2', type=int, default=2, help='the second layer convolution stride size')
    parser.add_argument('--num_classes', type=int, default=77, help='the number of categories')
    parser.add_argument('--num_classes_last', type=int, default=7, help='the number of categories to the last label')
    parser.add_argument('--photo_size', type=int, default=128, help='the number of categories to the last label')
    parser.add_argument('--Linear_nums', type=int, default=3, help='the number of categories to the last label')

    opt = parser.parse_args()

    train_set, test_set, train_dict, valid_set = get_data(opt.photo_size)

    train_data = DataLoader(dataset=train_set, batch_size=opt.batchSize, collate_fn=collate_v2, shuffle=True,
                            pin_memory=True, num_workers=0, drop_last=True)
    test_data = DataLoader(dataset=test_set, batch_size=opt.batchSize, collate_fn=collate_v2, pin_memory=True,
                           num_workers=0, drop_last=False)
    # test_data = DataLoader(dataset=test_set, batch_size=opt.batchSize, collate_fn=collate_test, pin_memory=True,
    #                        num_workers=0, drop_last=False)
    valid_data = DataLoader(dataset=valid_set, batch_size=opt.batchSize, collate_fn=collate_v2, pin_memory=True,
                           num_workers=0, drop_last=True)

    # train_set, train_dict = StrengthenData(train_set, train_dict)
    # train_data = DataLoader(dataset=train_set, batch_size=opt.batchSize, collate_fn=collate, shuffle=True,
    #                         pin_memory=True, num_workers=0, drop_last=True)

    embedding_dim = int((opt.photo_size - opt.Kernel_size1) // opt.Stride1 + 1)
    embedding_dim = int((embedding_dim - 2) // 2 + 1)
    embedding_dim = int((embedding_dim - opt.Kernel_size2 + 2) // opt.Stride2 + 1)
    embedding_dim = int((embedding_dim - 2) // 2 + 1)
    embedding_dim = int(embedding_dim * embedding_dim * 3)

    model = AesModel(embedding_dim=embedding_dim, opt=opt).cuda()
    # optimizer = optim.Adam(model.parameters(), lr=opt.lr, weight_decay=opt.l2)
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    loss_func1 = nn.BCEWithLogitsLoss()

    # loss_func1 = FocalLoss()

    # loss_func1 = nn.BCELoss()
    # loss_func1 = nn.CrossEntropyLoss()
    # loss_func2 = nn.CrossEntropyLoss()
    loss_func2 = FocalLoss()
    # loss_func2 = nn.BCEWithLogitsLoss()
    # loss_func2 = nn.CTCLoss()
    strat_time = time.time()
    # for epoch in range(opt.epoch):
    #     model, epoch_loss=train(model,train_data, train_dict, opt, epoch, loss_func1=loss_func1, loss_func2=loss_func2, optimizer=optimizer)
    #     # with open(f"{opt.photo_size}_{opt.Linear_nums}_{opt.lr}.txt", mode='a') as file:
    #     #     out = str(f"the {epoch} loss is {epoch_loss}\n")
    #     #     file.write(out)
    #     # test(model, valid_data)
    #     # test(model, test_data)
    #     # if epoch % 20 == 0:
    #     if epoch == opt.epoch - 1:

    #         print("Predicting start")
    #         # end_time = time.time()
    #         ACC_now, Recall_now, HR_now, MRR_now, NDCG_now = test(model,test_data)
    #         with open(f"result.txt", mode='a') as file:
    #             # out = str(f"mae is {mae}, mse is {mse}, rmse is {rmse}\n")
    #             # out = str(f"模型运行时间: {end_time - strat_time}s")
    #             file.write(f'ACC:{ACC_now}, Recall:{Recall_now}, HR:{HR_now}, MRR:{MRR_now}, NDCG:{NDCG_now}')
    #         print("Predicting end")
    # torch.save(model.state_dict(), 'aes_model_weight.pth')