from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer, BertModel
from load_data import *
from torch.utils.data import Dataset, DataLoader
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
import os
import math
import re
from myModel import *
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, accuracy_score, f1_score, precision_score, recall_score
import random
from sklearn.model_selection import KFold

def run(
    model_name = 'bert-base-uncased',
    epochs=3, batch_size=16, dropout_rate=0.5, lr=2e-5, max_seq_len=300, hidden_size=768,
    n_class=2, seed=33,
    device = 'cuda:0'
):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    tokenizer = BertTokenizer.from_pretrained(model_name)
    labeled_df, test_df = get_data()

    test_x= test_df['review']
    test_tokens = tokenizer(test_x.tolist(), truncation=True, padding=True, max_length=max_seq_len)
    test_dataset = my_Dataset(test_tokens, None, test_x.tolist(), test=True)
    test_iter = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

    res = None
    
    train_x, valid_x, train_y, valid_y = \
    train_test_split(labeled_df['review'], labeled_df[['labels']], test_size=0.2, random_state=seed)
    

    # val_x, val_y
    tokenizer = BertTokenizer.from_pretrained(model_name)
    train_tokens = tokenizer(train_x.tolist(), truncation=True, padding=True, max_length=max_seq_len)
    valid_tokens = tokenizer(valid_x.tolist(), truncation=True, padding=True, max_length=max_seq_len)

    train_dataset = my_Dataset(train_tokens, train_y.reset_index(drop=True), train_x.tolist())
    train_iter = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

    valid_dataset = my_Dataset(valid_tokens, valid_y.reset_index(drop=True), valid_x.tolist())
    valid_iter = DataLoader(valid_dataset, batch_size=batch_size, shuffle=True)


    num_training_steps = len(train_iter) * epochs
    # model = Baseline_Model(
    #     model_name=model_name, n_class=n_class, max_seq_len=max_seq_len, 
    #     hidden_size=hidden_size, dropout_rate=0.5, device=device
    # )
    model = Baseline_Model.load('save/model.bin')
    print('Load trained model success')
    # lrr = 0.001
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    loss_func = nn.CrossEntropyLoss()

    model.to(device)
    model = train(
        train_iter, tokenizer, model, optimizer, loss_func, epochs, batch_size,
        dropout_rate, lr, max_seq_len, hidden_size, device, valid_iter
    )
    

def train(
    train_iter, tokenizer = None, model = None, optimizer = None, loss_func = None, 
    epochs=3, batch_size=16, dropout_rate=0.5, lr=2e-5, max_seq_len=300, hidden_size=768,
    device='cuda', valid_iter = None, lrr=0, patience=2,
    load=False, save_path='save/model.bin'
):
    print('-'*100)
    print('Begin training!')
    model.to(device)
    model.train()
    cost = float('inf')
    pat = 0
    model.set_grad()
    for epoch in range(1, 1+epochs):   
        model.train()
        tot_loss = 0.
        log_time = 30
        if epoch % 3 == 0 and epoch > 0:
            lr *= 0.5
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr 
        for idx, (tr_batch, content) in enumerate(tqdm(train_iter)):
            input_ids = tr_batch['input_ids'].to(device)
            token_type_ids = tr_batch['token_type_ids'].to(device)
            attention_mask = tr_batch['attention_mask'].to(device)
            label = tr_batch['labels'].to(device)

            optimizer.zero_grad()

            output = model(input_ids, token_type_ids, attention_mask, istrain=True)
            #label = label.unsqueeze(-1)            
            loss = loss_func(output, label)
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 2.5)
            optimizer.step()

            tot_loss += loss.item()
            torch.cuda.empty_cache()
            # if idx % log_time == 0:
            #     print('cost is %f idx %d' %(tot_loss, idx))
        print('==================')
        print('epoch %d loss %f' %(epoch, tot_loss))
        evaluate(model, valid_iter)
        if tot_loss < cost:
            cost = tot_loss
            pat = 0
        else:
            pat += 1
            if pat > patience:
                break
        
    model.save(save_path)
    return model

def test():
    model_name = 'bert-base-uncased'
    max_seq_len=300
    n_class=2
    batch_size=16
    dropout_rate=0.5
    lr=2e-5
    hidden_size=768
    seed=33
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    tokenizer = BertTokenizer.from_pretrained(model_name)
    
    df_test = pd.read_csv('test_data.csv')
    df_test['review'] = df_test['review'].str.replace("<br />", "")
    test_df = df_test[['review']]

    test_x= test_df['review']
    test_tokens = tokenizer(test_x.tolist(), truncation=True, padding=True, max_length=max_seq_len)
    test_dataset = my_Dataset(test_tokens, None, test_x.tolist(), test=True)
    test_iter = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

    res = None
    model = Baseline_Model.load('save/model.bin')
    print('Load model success')
    res = predict(model, test_iter)

    res = np.array(res)

    test_df['sentiment'] = res
    test_df['sentiment'] = test_df['sentiment'].apply(lambda x: 'positive' if x == 1 else 'negative')
    # test_df = test_df[['sentiment']]
    submission_df = pd.read_csv('submission.csv')
    submission_df['sentiment'] = test_df['sentiment']
    submission_df.to_csv('submission_test.csv', index=False)
    print('save result to submission_test.csv')
    # test_df.to_csv('test.csv', index=True)

def evaluate(model, iter, device='cuda'):
    print('valid..........................................')
    model.to(device)
    model.eval()
    labels = []
    res = []

    with torch.no_grad():
        for idx, (val_batch, content) in enumerate(iter):
            input_ids = val_batch['input_ids'].to(device)
            attention_mask = val_batch['attention_mask'].to(device)
            token_type_ids = val_batch['token_type_ids'].to(device)
            label = val_batch['labels'].to(device)
            output = model(input_ids, attention_mask, token_type_ids)
            loss = F.cross_entropy(output, label)
            output = torch.argmax(output, axis=1).tolist()
            label = label.tolist()

            res += output
            
            labels += label
    labels = np.array(labels)

    accuracy = accuracy_score(labels, res)
    print('accuracy is %f' % (accuracy))
    f1 = f1_score(labels, res)
    pre = precision_score(labels, res)
    rec = recall_score(labels, res)
    print('f1 is %f' %(f1))
    print('precision is %f' %(pre))
    print('recall is %f' %(rec))
    return accuracy

def predict(model, val_iter, device='cuda', return_Mid=False):
    print('predicting.......................................')
    model.to(device)
    model.eval()
    res = []
    r = None
    with torch.no_grad():
        for idx, (val_batch, content) in enumerate(val_iter):
            input_ids = val_batch['input_ids'].to(device)
            attention_mask = val_batch['attention_mask'].to(device)
            token_type_ids = val_batch['token_type_ids'].to(device)
            output = model(input_ids, attention_mask, token_type_ids)
            if r is None:
                r = output
            else:
                r = torch.cat((r, output))
            output = torch.argmax(output, axis=1).tolist()
            res += output
    if return_Mid:
        return r
    res = np.array(res)
    return res


if __name__=='__main__':
    run()
    # get_data()
    test()
    
