from __future__ import absolute_import, division, print_function


import random
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
                              TensorDataset)
from tqdm import tqdm, trange
from torch.nn import CrossEntropyLoss
from toxicity_classification.model.model_bert import BertForSequenceClassification
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam
from toxicity_classification.data_preprocess.data_preprocessor_bert import *
from toxicity_classification.utils.matrics import *

logger = logging.getLogger(__name__)
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
CATEGORY = "category.txt"


class ToxicityClassificationTask:
    def __init__(self, args):
        self.args = args
        if self.args.no_cuda:
            self.device = torch.device('cpu')
        else:
            # self.device = torch.device("cuda", self.args.gpu_num)
            self.device = torch.device("cuda", [int(each.strip()) for each in str(self.args.gpu_num).split(',')][0])

        random.seed(self.args.seed)
        if self.args.do_train:
            if not os.path.exists(self.args.output_dir):
                os.makedirs(self.args.output_dir)
        self.processor = ToxicityProcessor()
        if self.args.do_train:
            self.label_list = self.processor.get_labels(self.args.data_dir)
        else:
            self.label_list = self.processor.get_labels(self.args.fine_tune_dir)
        self.n_gpu = len([int(each.strip()) for each in str(self.args.gpu_num).split(',')])

        # if self.args.do_train:
        #     self.tokenizer = BertTokenizer.from_pretrained(self.args.bert_model + '-vocab.txt',
        #                                                    do_lower_case=self.args.do_lower_case)
        # else:
        #     self.tokenizer = BertTokenizer.from_pretrained(os.path.join(self.args.fine_tune_dir, 'vocab.txt'),
        #                                                    do_lower_case=
        self.tokenizer = BertTokenizer.from_pretrained(self.args.vocab, do_lower_case=self.args.do_lower_case)
        self.model = None

    def train(self):
        train_examples = self.processor.get_train_examples(self.args.data_dir)
        num_train_optimization_steps = int(len(train_examples) / self.args.train_batch_size
                                           ) * self.args.num_train_epochs

        self.model = BertForSequenceClassification.from_pretrained(self.args.bert_model,
                                                                   num_labels=len(self.label_list))

        if self.n_gpu > 1:
            self.model = torch.nn.DataParallel(self.model,
                                          device_ids=[int(each.strip()) for each in str(self.args.gpu_num).split(',')])
        elif self.n_gpu == 1:
            self.device = torch.device('cuda', int(self.args.gpu_num))

        self.model.to(self.device)


        # Prepare optimizer
        param_optimizer = list(self.model.named_parameters())
        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [
            {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
            {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
        ]

        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=self.args.learning_rate,
                             warmup=self.args.warmup_proportion,
                             t_total=num_train_optimization_steps)

        global_step = 0
        train_features = convert_examples_to_features(train_examples,
                                                      self.label_list,
                                                      self.args.max_seq_length,
                                                      self.tokenizer,
                                                      "classification")
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_examples))
        logger.info("  Batch size = %d", self.args.train_batch_size)
        logger.info("  Num steps = %d", num_train_optimization_steps)

        all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
        all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
        train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
        train_sampler = RandomSampler(train_data)
        train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=self.args.train_batch_size)

        self.model.train()
        for _ in trange(int(self.args.num_train_epochs), desc="Epoch"):
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
            for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
                batch = tuple(t.to(self.device) for t in batch)
                input_ids, input_mask, segment_ids, label_ids = batch

                # define a new function to compute loss values for both output_modes
                logits = self.model(input_ids, segment_ids, input_mask, labels=None)

                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, len(self.label_list)), label_ids.view(-1))

                loss = loss.mean()  # mean() to average on multi-gpu.

                loss.backward()

                tr_loss += loss.item()
                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1
                optimizer.step()
                optimizer.zero_grad()
                global_step += 1

        model_to_save = self.model.module if hasattr(self.model, 'module') else self.model  # Only save the model it-self

        # If we save using the predefined names, we can load using `from_pretrained`
        output_model_file = os.path.join(self.args.output_dir, WEIGHTS_NAME)
        output_config_file = os.path.join(self.args.output_dir, CONFIG_NAME)
        output_category_file = os.path.join(self.args.output_dir, CATEGORY)

        torch.save(model_to_save.state_dict(), output_model_file)
        model_to_save.config.to_json_file(output_config_file)
        self.tokenizer.save_vocabulary(self.args.output_dir)
        with open(output_category_file, 'w') as fh:
            for cat in self.label_list:
                fh.write(str(cat) + '\n')

        if self.args.do_eval:
            eval_examples = self.processor.get_dev_examples(self.args.data_dir)
            eval_features = convert_examples_to_features(
                eval_examples, self.label_list, self.args.max_seq_length, self.tokenizer, "classification")
            logger.info("***** Running evaluation *****")
            logger.info("  Num examples = %d", len(eval_examples))
            logger.info("  Batch size = %d", self.args.eval_batch_size)
            all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
            all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
            all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
            all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)

            eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
            # Run prediction for full data
            eval_sampler = SequentialSampler(eval_data)
            eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.args.eval_batch_size)

            self.model.eval()
            eval_loss = 0
            nb_eval_steps = 0
            preds = []

            for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
                input_ids = input_ids.to(self.device)
                input_mask = input_mask.to(self.device)
                segment_ids = segment_ids.to(self.device)
                label_ids = label_ids.to(self.device)

                with torch.no_grad():
                    logits = model_to_save(input_ids, segment_ids, input_mask, labels=None)

                # create eval loss and other metric required by the task
                loss_fct = CrossEntropyLoss()
                tmp_eval_loss = loss_fct(logits.view(-1, len(self.label_list)), label_ids.view(-1))

                eval_loss += tmp_eval_loss.mean().item()
                nb_eval_steps += 1
                if len(preds) == 0:
                    preds.append(logits.detach().cpu().numpy())
                else:
                    preds[0] = np.append(
                        preds[0], logits.detach().cpu().numpy(), axis=0)

            eval_loss = eval_loss / nb_eval_steps
            preds = preds[0]
            preds = np.argmax(preds, axis=1)
            result = compute_metrics('Commodity-Classification', preds, all_label_ids.numpy())
            loss = tr_loss / global_step if self.args.do_train else None

            result['eval_loss'] = eval_loss
            result['global_step'] = global_step
            result['loss'] = loss

            output_eval_file = os.path.join(self.args.output_dir, "eval_results.txt")
            with open(output_eval_file, "w") as writer:
                logger.info("***** Eval results *****")
                for key in sorted(result.keys()):
                    logger.info("  %s = %s", key, str(result[key]))
                    writer.write("%s = %s\n" % (key, str(result[key])))

    def predict(self):

        self.model = BertForSequenceClassification.from_pretrained(self.args.fine_tune_dir,
                                                                   num_labels=len(self.label_list))
        self.tokenizer = BertTokenizer.from_pretrained(self.args.fine_tune_dir,
                                                       do_lower_case=self.args.do_lower_case)
        self.model.to(self.device)
        if self.n_gpu > 1:
            self.model = torch.nn.DataParallel(self.model,
                                          device_ids=[int(each.strip()) for each in str(self.args.gpu_num).split(',')])
        eval_examples = self.processor.get_test_examples(self.args.data_dir)
        eval_features = convert_examples_to_features(
            eval_examples, self.label_list, self.args.max_seq_length, self.tokenizer, "classification")
        logger.info("***** Running evaluation *****")
        logger.info("  Num examples = %d", len(eval_examples))
        logger.info("  Batch size = %d", self.args.eval_batch_size)
        all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
        all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
        eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
        # Run prediction for full data

        eval_sampler = SequentialSampler(eval_data)
        eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.args.eval_batch_size)

        self.model.eval()
        nb_eval_steps = 0
        preds = []

        for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Predicting"):
            input_ids = input_ids.to(self.device)
            input_mask = input_mask.to(self.device)
            segment_ids = segment_ids.to(self.device)
            with torch.no_grad():
                logits = self.model(input_ids, segment_ids, input_mask, labels=None)
            nb_eval_steps += 1
            if len(preds) == 0:
                preds.append(logits.detach().cpu().numpy())
            else:
                preds[0] = np.append(
                    preds[0], logits.detach().cpu().numpy(), axis=0)
        logits = preds[0]
        preds = np.argmax(logits, axis=1)
        preds = [self.label_list[i] for i in preds]
        return preds, logits

