import os
import sys

root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(root_path)
import torch
from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score, classification_report
from torch import nn
from torch.optim import AdamW
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import BertTokenizer, BertModel, BertConfig
from utils import id2label, ensure_pdir_exist
from config import conf
from m_config import m_conf
from bert_yt.my_bert import MyDataset, device, pretty_draw

pt_config = BertConfig(m_conf.pretrained_model_path)
pt_tokenizer = BertTokenizer.from_pretrained(m_conf.pretrained_model_path)
pt_model = BertModel.from_pretrained(m_conf.pretrained_model_path)


def collate_fn(batch):
    texts = [x[0] for x in batch]
    labels = [x[1] for x in batch]
    tokens = pt_tokenizer.batch_encode_plus(
        texts,
        return_tensors='pt',
        max_length=m_conf.max_length,
        padding=True,
        truncation=True,
    )
    return tokens['input_ids'], tokens['attention_mask'], torch.LongTensor(labels)


def get_dataloader(dataset):
    return DataLoader(
        dataset,
        batch_size=m_conf.batch_size,
        shuffle=True,
        collate_fn=collate_fn,
    )


class MyModel(nn.Module):
    def __init__(self, fix_bert_weights=True):
        super().__init__()
        self.fix_bert_weights = fix_bert_weights
        self.bert = pt_model
        self.fc = nn.Linear(pt_config.hidden_size, len(id2label))

    def forward(self, input_ids, attention_mask):
        if self.fix_bert_weights:
            with torch.no_grad():
                outputs = self.bert(input_ids, attention_mask)
        else:
            outputs = self.bert(input_ids, attention_mask)
        logits = self.fc(outputs.last_hidden_state[:, 0])
        return logits


def validata_on_dev(model, val_dataloader):
    pred_list = []
    label_list = []
    for input_ids, attention_mask, labels in val_dataloader:
        input_ids, attention_mask, labels = input_ids.to(device), attention_mask.to(device), labels.to(device)
        with torch.no_grad():
            model.eval()
            pred = model(input_ids, attention_mask)
        pred_list += pred.argmax(dim=1).tolist()
        label_list += labels.tolist()
    f1 = f1_score(label_list, pred_list, average='micro')
    print(f'f1 on dev = {f1:.4f}')
    return f1


def train(fix_bert_weights=True):
    dataloader = get_dataloader(MyDataset(conf.train_path))
    val_dataloader = get_dataloader(MyDataset(conf.val_path))
    model = MyModel(fix_bert_weights).to(device)
    optimizer = AdamW(model.parameters(), lr=m_conf.lr)
    criterion = nn.CrossEntropyLoss()

    if fix_bert_weights:
        for p in pt_model.parameters():
            p.requires_grad_(False)

    epoch_dev_max_f1 = 0.0
    train_f1 = []
    dev_f1 = []
    x_label = []
    for i in range(m_conf.epochs):
        total_loss = 0.0
        total_item = 0
        pred_list = []
        label_list = []
        batch_dev_final_f1 = 0.0
        for j, (input_ids, attention_mask, labels) in tqdm(enumerate(dataloader), f'iter dataloader'):
            input_ids, attention_mask, labels = input_ids.to(device), attention_mask.to(device), labels.to(device)
            model.train()
            pred = model(input_ids, attention_mask)
            loss = criterion(pred, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            total_loss += loss
            total_item += len(input_ids)
            pred_list += pred.argmax(dim=1).tolist()
            label_list += labels.tolist()
            if (j + 1) % m_conf.log_batch == 0:
                print(f'epoch: {i + 1}, iter: {j + 1}, avg_loss: {total_loss / total_item}')
                f1 = f1_score(label_list, pred_list, average='micro')
                print(f'f1 on train = {f1:.4f}')
                batch_dev_final_f1 = validata_on_dev(model, val_dataloader)
                train_f1.append(f1)
                dev_f1.append(batch_dev_final_f1)
                x_label.append(f'{i + 1}-{j + 1}')

        pretty_draw(train_f1, dev_f1, x_label, m_conf.f1_img_path[:-4] + f'{i + 1}.png')
        if batch_dev_final_f1 > epoch_dev_max_f1:
            epoch_dev_max_f1 = batch_dev_final_f1
            ensure_pdir_exist(m_conf.model_path)
            torch.save(model.state_dict(), m_conf.model_path)
            print(f'更新验证集最大f1={epoch_dev_max_f1}，保存模型')
    # epoch4 更新验证集最大f1=0.7568，保存模型
    pass


_model = None


def get_model(fix_bert_weights=True):
    global _model
    if _model is None:
        _model = MyModel(fix_bert_weights).to(device)
        _model.load_state_dict(torch.load(m_conf.model_path))
    return _model


def validata(fix_bert_weights=True):
    model = get_model(fix_bert_weights)
    model.eval()
    dataloader = get_dataloader(MyDataset(conf.test_path))

    pred_list = []
    label_list = []
    with torch.no_grad():
        for input_ids, attention_mask, labels in tqdm(dataloader, 'test...'):
            input_ids, attention_mask, labels = input_ids.to(device), attention_mask.to(device), labels.to(device)
            pred = model(input_ids, attention_mask)
            pred_list += pred.argmax(dim=1).tolist()
            label_list += labels.tolist()

    acc_score = accuracy_score(label_list, pred_list)
    print(f'准确率：{acc_score}')
    pre_score = precision_score(label_list, pred_list, average='micro')
    print(f'精确率：{pre_score}')
    rec_score = recall_score(label_list, pred_list, average='micro')
    print(f'召回率：{rec_score}')
    f1 = f1_score(label_list, pred_list, average='micro')
    print(f'F1-score：{f1}')
    target_names = [id2label[i] for i in range(len(id2label))]
    c_report = classification_report(label_list, pred_list, target_names=target_names)
    print(f'分类报告：\n{c_report}')
    """
准确率：0.7675
精确率：0.7675
召回率：0.7675
F1-score：0.7675
分类报告：
              precision    recall  f1-score   support

          体育       1.00      0.92      0.96      1000
          娱乐       0.94      0.82      0.87      1000
          家居       0.76      0.19      0.30      1000
          房产       0.46      0.86      0.60      1000
          教育       0.53      0.87      0.66      1000
          时尚       0.88      0.95      0.91      1000
          时政       0.95      0.72      0.82      1000
          游戏       0.90      0.92      0.91      1000
          科技       0.92      0.83      0.87      1000
          财经       0.91      0.59      0.71      1000

    accuracy                           0.77     10000
   macro avg       0.82      0.77      0.76     10000
weighted avg       0.82      0.77      0.76     10000

    """


def predict(text):
    model = get_model()
    input_ids, attention_mask, _ = collate_fn([(text, -1)])
    input_ids, attention_mask = input_ids.to(device), attention_mask.to(device)
    with torch.no_grad():
        return id2label[model(input_ids, attention_mask).argmax(dim=1).item()]


def _test_predict():
    for text, y_true in zip(conf.test_texts, conf.test_results):
        result = predict(text.strip())
        print(f'predict text={text[:100]}\nresult={result}\tcorrect={result == y_true}')


def _test_draw():
    train_f1 = [0.52, 0.61, 0.68, 0.77, 0.81, 0.85, 0.89, 0.91, 0.92, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99, 1.0]
    dev_f1 = [0.62, 0.68, 0.77, 0.84, 0.92, 0.94, 0.96, 0.95, 0.96, 0.94, 0.91, 0.90, 0.92, 0.88, 0.87, 0.90]
    max_batch = 4
    batch = 1
    epoch = 1
    x_label = []
    while len(x_label) < len(train_f1):
        x_label.append(f'{epoch}-{batch}')
        batch += 1
        if batch >= max_batch:
            epoch += 1
            batch = 1
    pretty_draw(train_f1, dev_f1, x_label)


if __name__ == '__main__':
    # _test_draw()
    # train()
    # train(fix_bert_weights=False)
    # validata()
    _test_predict()
    pass
