from data import to_data_loader, load_data
from models import (Inception, RCNN, BiLSTM, TextCNN,
                    CNN_BiLSTM, AttnCNN, RNN, TestM)
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from train import train
import gc
from common.configs.path import paths
from common.configs.tools import reversed_label, set_seed, predict, weights_init_uniform_rule
import torch.optim as optim
import torch.nn as nn
import torch
import numpy as np
import pandas as pd
import random
from tqdm import tqdm
from argparse import ArgumentParser

import warnings

set_seed(42)


def fxn():
    warnings.warn("deprecated", DeprecationWarning)


with warnings.catch_warnings():
    warnings.simplefilter("ignore")
    fxn()

if torch.cuda.is_available():
    print('gpu is available: {}'.format(torch.cuda.get_device_name(0)))
    print('device count: {}'.format(torch.cuda.device_count()))

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
print()


def main():
    parser = ArgumentParser(description='')
    parser.add_argument('--embed_num', type=int, help='vocab size',
                        default=0)
    parser.add_argument('--embed_dim', type=int, help='embed dim',
                        default=200)
    parser.add_argument('--learning_rate', type=float, help='learning rate',
                        default=0.25)
    parser.add_argument('--class_num', type=int, help='class num',
                        default=35)
    parser.add_argument('--kernel_num', type=int, help='kernel num',
                        default=50)
    parser.add_argument('--kernel_sizes', type=str, help='kernel sizes',
                        default="3,4,5,6,7,8,9")
    parser.add_argument('--cuda', type=bool, help='cuda',
                        default=torch.cuda.is_available())
    parser.add_argument('--dropout', type=float, help='dropout',
                        default=0.5)
    parser.add_argument('--dropout_embed', type=float, help='dropout',
                        default=0.5)
    parser.add_argument('--batch_normalizations', type=bool, help='batch_normalizations',
                        default=True)
    parser.add_argument('--bath_norm_momentum', type=float, help='momentum',
                        default=0.1)
    parser.add_argument('--batch_norm_affine', type=bool, help='affine',
                        default=True)
    parser.add_argument('--max_norm', type=float, help='max_norm',
                        default=None)
    parser.add_argument('--word_Embedding', type=bool, help='word_embedding',
                        default=False)
    parser.add_argument('--pretrained_weight', type=bool, help='pretrained_weight',
                        default=True)
    parser.add_argument('--wide_conv', type=bool, help='wide conv',
                        default=True)
    parser.add_argument('--init_weight', type=bool, help='init weight',
                        default=False)
    parser.add_argument('--init_weight_value', type=float, help='init_weight_value',
                        default=2.0)
    parser.add_argument('--device', type=str, help='device',
                        default=device)
    parser.add_argument('--lstm_hidden_dim', type=int, help='lstm_hidden_dim',
                        default=100)
    parser.add_argument('--lstm_num_layers', type=int, help='lstm_num_layers',
                        default=4)
    parser.add_argument('--linear_hidden_size', type=int, help='linear_hidden_size',
                        default=1000)
    parser.add_argument('--epochs', type=int, help='epochs',
                        default=50)
    parser.add_argument('--batch_size', type=int, help='batch size',
                        default=16)
    parser.add_argument('--paddingId', type=int, help='padding id',
                        default=0)
    parser.add_argument('--gram', type=int, help='text gram',
                        default=1)
    parser.add_argument('--k', type=int, help='k fold',
                        default=5)
    parser.add_argument('--save', type=bool, help='if save',
                        default=False)
    parser.add_argument('--predict', type=bool, help='if predict',
                        default=False)
    parser.add_argument('--model', type=str, help='model',
                        default='TextCNN1d')
    parser.add_argument('--cv', type=bool, help='if cross validation',
                        default=False)
    parser.add_argument('--savecp', type=bool, help='if save checkpoint',
                        default=False)

    args = parser.parse_args()

    args.kernel_sizes = [int(s) for s in args.kernel_sizes.split(',')]

    train_texts, input_ids, test_texts, labels, word2idx, embeddings = load_data(
        args.gram, max_len=64)

    args.embed_num = len(word2idx)

    args.pretrained_weight = embeddings

    if args.model == 'TextCNN1d':
        model = TextCNN.TextCNN1d(args)
    elif args.model == 'TextCNN2d':
        model = TextCNN.TextCNN2d(args)
    elif args.model == 'DeepCNN':
        model = TextCNN.DeepCNN(args)
    elif args.model == 'CNN_BiLSTM':
        model = CNN_BiLSTM.CBiLSTM(args)
    elif args.model == 'AttnCNN':
        model = AttnCNN.AttentionCNN(args)
    elif args.model == 'BiLSTMAttn':
        model = RNN.BiLSTMAttn(args)
    elif args.model == 'BiLSTM':
        model = BiLSTM.BiLSTM(args)
    elif args.model == 'BiGRU':
        model = BiLSTM.BiGRU(args)
    elif args.model == 'RCNN':
        model = RCNN.RCNN(args, 2)
    elif args.model == 'InCNN':
        model = Inception.InCNN(args)
    elif args.model == 'TestM':
        model = TestM.TestM(args)

    print("[model]", args.model)
    print("[epochs]", args.epochs)
    print("[batch size]", args.batch_size)
    print("[learning rate]", args.learning_rate)
    print("[dropout]", args.dropout)
    print("[embedding]", args.word_Embedding)
    print("[gram]", args.gram)
    print("[predict]", args.predict)
    print("[save]", args.save)
    print()

    # model.to(device)
    model.apply(weights_init_uniform_rule)
    optimizer = optim.Adadelta(
        model.parameters(), lr=args.learning_rate, rho=0.95)

    loss_fn = nn.CrossEntropyLoss()

    X_train, X_val, y_train, y_val = train_test_split(
        input_ids, labels, test_size=0.1, random_state=42)

    print("Start training...\n")

    if not args.cv:
        model.to(device)
        train_dataloader, val_dataloader = to_data_loader(
            X_train.astype(float), X_val.astype(float), y_train, y_val, batch_size=args.batch_size)

        best_f1, best_accuracy = train(args, model, optimizer, loss_fn, train_dataloader,
                                       val_dataloader, device=device, epochs=args.epochs)
        print("best f1: {}, best accuracy: {}".format(
            round(best_f1, 4), round(best_accuracy, 4)))
        print()
    else:

        skf = StratifiedKFold(n_splits=args.k, random_state=31, shuffle=True)
        for k, (train_index, test_index) in enumerate(skf.split(X_train, y_train)):
            model.to(device)
            print("[{}]".format(k), "TRAIN:",
                  train_index.shape, "TEST:", test_index.shape)
            train_inputs, val_inputs = input_ids[train_index], input_ids[test_index]
            train_labels, val_labels = labels[train_index], labels[test_index]

            train_dataloader, val_dataloader = to_data_loader(
                train_inputs.astype(float), val_inputs.astype(float), train_labels, val_labels, batch_size=args.batch_size)

            best_f1, best_accuracy = train(optimizer, loss_fn, train_dataloader,
                                           val_dataloader, device=device, epochs=args.epochs)

            pred_val = [torch.argmax(model.to("cpu").forward(
                torch.tensor(x_v).unsqueeze(dim=0)), dim=1).flatten()[0] for x_v in X_val]
            pred_val = np.array(pred_val).flatten()
            vali_f1 = round(f1_score(y_val, pred_val, average='macro'), 4)

            print("[{}] best f1: {}, best accuracy: {}, valid f1: {}".format(
                k, round(best_f1, 4), round(best_accuracy, 4), vali_f1))
            print()

    if args.save:
        torch.save(model, 'common/models/' + '{}_[f1]{}_[ep]{}_[lr]{}_[gram]{}_[Emd]{}.model'.format(args.model,
                                                                                                     str(round(best_f1, 4)).replace(
                                                                                                         '.', ''),
                                                                                                     str(round(
                                                                                                         args.learning_rate, 4)).replace('.', ''),
                                                                                                     args.epochs,
                                                                                                     args.gram,
                                                                                                     args.word_Embedding))
        with open('common/models/{}.log'.format(args.model), 'a', encoding='utf-8') as f:
            f.write('[gram] {}\n[batch size] {}\n[learning rate] {}\n[dropout] {}\nBest F1: {}\n\n'.format(
                args.gram, args.batch_size, args.learning_rate, args.dropout, str(round(best_f1, 4))))

    if args.predict:
        output = pd.DataFrame(columns=['id', 'label'])

        for i, text in tqdm(enumerate(test_texts)):
            label = reversed_label[predict(
                text, model=model, word2idx=word2idx).numpy()[0]]
            output.loc[i] = [i, label]
        output.to_csv('result_{}_[f1]{}_[ep]{}_[lr]{}_[gram]{}_[Emd]{}.csv'.format(args.model,
                                                                                   str(round(best_f1, 4)).replace(
                                                                                       '.', ''),
                                                                                   str(round(
                                                                                       args.learning_rate, 4)).replace('.', ''),
                                                                                   args.epochs,
                                                                                   args.gram,
                                                                                   args.word_Embedding), index=False)

    gc.collect()
    torch.cuda.empty_cache()


if __name__ == "__main__":
    main()
