import joblib
import torch
from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score, classification_report
from torch import nn
from torch.optim import AdamW
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from transformers import BertTokenizer, BertModel, BertConfig

from common import get_id2label
from config import conf


def load_data(path):
    data = []
    with open(path, 'r', encoding='utf-8') as f:
        for line in tqdm(f, f'load data from {path}'):
            line = line.strip()
            if not line:
                continue
            split = line.split('\t')
            data.append((split[0], int(split[1])))
    print(f'数据长度：{len(data)}')
    print(f'部分数据：{data[:5]}')
    return data


class MyDataset(Dataset):
    def __init__(self, path):
        self.data = load_data(path)
        self.len = len(self.data)

    def __len__(self):
        return self.len

    def __getitem__(self, index):
        x = self.data[index][0]
        y = self.data[index][1]
        return x, y


def _test_dataset():
    print(MyDataset(conf.train_path))


device = conf.bert_config.device
bert_config = BertConfig(conf.bert_config.pretrained_model_path)
bert_tokenizer = BertTokenizer.from_pretrained(conf.bert_config.pretrained_model_path)
bert_model = BertModel.from_pretrained(conf.bert_config.pretrained_model_path)


def collate_fn(batch):
    texts = [x[0] for x in batch]
    labels = [x[1] for x in batch]
    tokens = bert_tokenizer.batch_encode_plus(
        texts,
        return_tensors='pt',
        # max_length=conf.bert_config.padding_size,
        padding=True
    )
    return tokens['input_ids'], tokens['attention_mask'], torch.LongTensor(labels)


def get_dataloader(dataset):
    return DataLoader(
        dataset,
        batch_size=conf.bert_config.batch_size,
        shuffle=True,
        collate_fn=collate_fn,
    )


class MyModel(nn.Module):
    def __init__(self, fix_bert_weights=True):
        super().__init__()
        self.fix_bert_weights = fix_bert_weights
        self.bert = bert_model
        self.fc = nn.Linear(bert_config.hidden_size, len(get_id2label()))

    def forward(self, input_ids, attention_mask):
        if self.fix_bert_weights:
            with torch.no_grad():
                outputs = self.bert(input_ids, attention_mask)
        else:
            outputs = self.bert(input_ids, attention_mask)
        logits = self.fc(outputs.last_hidden_state[:, 0])
        return logits


def _test_model():
    dataset = MyDataset(conf.train_path)
    dataloader = get_dataloader(dataset)
    model = MyModel()
    for input_ids, attention_mask, labels in dataloader:
        print(input_ids, attention_mask, labels)
        pred = model(input_ids, attention_mask)
        print(pred)
        break


def validata_on_dev(model, dev_dataloader):
    pred_list = []
    label_list = []
    for input_ids, attention_mask, labels in dev_dataloader:
        input_ids, attention_mask, labels = input_ids.to(device), attention_mask.to(device), labels.to(device)
        with torch.no_grad():
            pred = model(input_ids, attention_mask)
        pred_list += pred.argmax(dim=1).tolist()
        label_list += labels.tolist()
    f1 = f1_score(label_list, pred_list, average='micro')
    print(f'f1 on dev = {f1:.4f}')
    return f1


def train(fix_bert_weights=True):
    dataloader = get_dataloader(MyDataset(conf.train_path))
    dev_dataloader = get_dataloader(MyDataset(conf.dev_path))
    model = MyModel(fix_bert_weights).to(device)
    optimizer = AdamW(model.parameters(), lr=conf.bert_config.lr)
    criterion = nn.CrossEntropyLoss()

    if fix_bert_weights:
        for p in bert_model.parameters():
            p.requires_grad_(False)

    epoch_dev_max_f1 = 0.0
    for i in range(conf.bert_config.epochs):
        total_loss = 0.0
        total_item = 0
        pred_list = []
        label_list = []
        batch_dev_max_f1 = 0.0
        for j, (input_ids, attention_mask, labels) in tqdm(enumerate(dataloader), f'iter dataloader'):
            input_ids, attention_mask, labels = input_ids.to(device), attention_mask.to(device), labels.to(device)
            pred = model(input_ids, attention_mask)
            loss = criterion(pred, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            total_loss += loss
            total_item += len(input_ids)
            pred_list += pred.argmax(dim=1).tolist()
            label_list += labels.tolist()
            if (j + 1) > 0 and (j + 1) % conf.bert_config.log_batch == 0:
                print(f'epoch: {i + 1}, iter: {j + 1}, avg_loss: {total_loss / total_item}')
                f1 = f1_score(label_list, pred_list, average='micro')
                print(f'f1 on train = {f1:.4f}')
                dev_f1 = validata_on_dev(model, dev_dataloader)
                batch_dev_max_f1 = max(batch_dev_max_f1, dev_f1)

        if batch_dev_max_f1 > epoch_dev_max_f1:
            epoch_dev_max_f1 = batch_dev_max_f1
            joblib.dump(model, conf.bert_config.model_path)
            print(f'更新验证集最大f1={epoch_dev_max_f1}， 保存模型')


def validata():
    model = joblib.load(conf.bert_config.model_path).to(device)
    dataloader = get_dataloader(MyDataset(conf.test_path))

    pred_list = []
    label_list = []
    with torch.no_grad():
        for input_ids, attention_mask, labels in tqdm(dataloader, 'test...'):
            input_ids, attention_mask, labels = input_ids.to(device), attention_mask.to(device), labels.to(device)
            pred = model(input_ids, attention_mask)
            pred_list += pred.argmax(dim=1).tolist()
            label_list += labels.tolist()

    acc_score = accuracy_score(label_list, pred_list)
    print(f'准确率：{acc_score}')
    pre_score = precision_score(label_list, pred_list, average='micro')
    print(f'精确率：{pre_score}')
    rec_score = recall_score(label_list, pred_list, average='micro')
    print(f'召回率：{rec_score}')
    f1 = f1_score(label_list, pred_list, average='micro')
    print(f'F1-score：{f1}')
    label_list = [get_id2label()[i] for i in range(len(get_id2label()))]
    c_report = classification_report(label_list, pred_list)
    print(f'分类报告：\n{c_report}')


_model = None


def get_model():
    global _model
    if _model is None:
        _model = joblib.load(conf.bert_config.model_path)
    return _model


def predict(text):
    model = get_model()
    input_ids, attention_mask, _ = collate_fn([(text, -1)])
    with torch.no_grad():
        return get_id2label()[model(input_ids, attention_mask).argmax(dim=1).item()]


def _test_predict():
    text = '名师详解考研复试英语听力备考策略'
    print(f'text={text} result={predict(text)}')
    text = '四六级考前阅读冲刺：如何发挥正常水平'
    print(f'text={text} result={predict(text)}')
    text = '广东雷州男子嗜水如命日饮百斤'
    print(f'text={text} result={predict(text)}')


if __name__ == '__main__':
    # _test_dataset()
    # _test_model()
    train()
    train(fix_bert_weights=False)
    # validata()
    # _test_predict()
    pass
