from transformers import AutoTokenizer, AutoModel
from datasets import load_from_disk, load_dataset
import torch.utils.data
import torch
import dltools

# 加载分词工具
# HF_ENDPOINT=https://hf-mirror.com
tokenizer = AutoTokenizer.from_pretrained("hfl/rbt6", cache_dir="./MNIST/cache", force_download=False)
print(f"tokenizer:{tokenizer}")
res = tokenizer.batch_encode_plus(
    [list("海钓比赛地点在厦门与金门之间的海域。"), list("这座依山傍水的博物馆由国内一流的设计师主持设计，整个建筑群精美而恢弘。")],
    truncation=True,
    padding=True,
    return_tensors="pt",
    is_split_into_words=True
)
print(f"res:{res}")
dataset = load_from_disk("./MNIST/peoples_daily_ner")
print(f"dataset:{dataset}")


class NerDataset(torch.utils.data.Dataset):
    def __init__(self, split):
        dataset = load_from_disk("./MNIST/peoples_daily_ner")[split]
        def f(data):
            # 前后留出开始和结束符位置
            return len(data["tokens"]) <= 512 - 2
        self.dataset = dataset.filter(f)


    def __len__(self):
        return len(self.dataset)

    def __getitem__(self, item):
        return self.dataset[item]


def collate_fn(data):
    tokens = [i["tokens"] for i in data]
    labels = [i["ner_tags"] for i in data]
    inputs = tokenizer.batch_encode_plus(
        tokens,
        padding=True,
        return_tensors="pt",
        is_split_into_words=True
    )
    max_lens = inputs["input_ids"].shape[1]
    for i in range(len(labels)):
        labels[i] = [7] + labels[i] + [7] * (max_lens - len(labels[i]) - 1)
    return inputs, torch.tensor(labels)


# 数据加载器
train_loader = torch.utils.data.DataLoader(
    dataset=NerDataset("train"),
    batch_size=16,
    shuffle=True,
    collate_fn=collate_fn,
    drop_last=True
)

for inputs, labels in train_loader:
    print(f"inputs:{inputs}")
    print(f"inputs['input_ids'][0]:{inputs['input_ids'][0]}")
    print(f"inputs['input_ids'][0]:{tokenizer.decode(inputs['input_ids'][0])}")
    print(f"inputs['token_type_ids'][0]:{inputs['token_type_ids'][0]}")
    print(f"labels[0]:{labels[0]}")
    break

device = dltools.try_gpu()
pretrained = AutoModel.from_pretrained("hfl/rbt6", cache_dir="./MNIST/cache", force_download=False).to(device)
print(f"参数量：{sum(i.numel() for i in pretrained.parameters())/10000:.3f}")


class NerModel(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.tuneing = False
        self.pretrained = None
        self.rnn = torch.nn.GRU(768, 768, batch_first=True)
        self.fc = torch.nn.Linear(768, 8)

    def forward(self, input_ids, token_type_ids, attention_mask):
        if self.tuneing:
            out = self.pretrained(input_ids, token_type_ids, attention_mask).last_hidden_state
        else:
            out = pretrained(input_ids, token_type_ids, attention_mask).last_hidden_state

        out,_ = self.rnn(out)
        out = self.fc(out).softmax(dim=-1)
        return out

    def fine_tuneing(self, tuneing):
        self.tuneing = tuneing
        if self.tuneing:
            for i in pretrained.parameters():
                i.requires_grad_(True)
            pretrained.train()
            self.pretrained = pretrained
        else:
            for i in pretrained.parameters():
                i.requires_grad_(False)
            pretrained.eval()
            self.pretrained = None


model = NerModel()
model.to(device)

for inputs, labels in train_loader:
    input_ids, token_type_ids, attention_mask = inputs["input_ids"].to(device), inputs["token_type_ids"].to(device), inputs["attention_mask"].to(device)
    out = model(input_ids, token_type_ids, attention_mask)
    print(f"out.shape:{out.shape}")
    break



def reshape_and_remove_pad(out, labels, attention_mask):
    out = out.reshape(-1, 8)
    labels = labels.reshape(-1)
    condition = attention_mask.reshape(-1) == 1
    out = out[condition]
    labels = labels[condition]
    return out, labels


# 获取正确数量和总数
def get_correct_and_total_count(labels, out):
    out = out.argmax(dim=-1)
    # 统计所有标签预测的准确率
    correct = (out == labels).sum().item()
    total = len(labels)

    # 统计非0标签预测的准确率
    select = labels != 0
    out = out[select]
    labels = labels[select]
    correct_content = (out == labels).sum().item()
    total_content = len(labels)
    return correct, total, correct_content, total_content


res = get_correct_and_total_count(torch.ones(16), torch.randn(16, 8))
print(f"res:{res}")


def train(epochs, model: torch.nn.Module):
    lr = 2e-5 if model.tuneing else 5e-4
    optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
    loss_fn = torch.nn.CrossEntropyLoss()
    model.to(device)
    model.train()
    for epoch in range(epochs):
        for i, (inputs, labels) in enumerate(train_loader):
            input_ids, token_type_ids, attention_mask = inputs["input_ids"].to(device), inputs["token_type_ids"].to(device), inputs["attention_mask"].to(device)
            labels = labels.to(device)
            optimizer.zero_grad()
            model.zero_grad()
            out = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
            out, labels = reshape_and_remove_pad(out, labels, attention_mask)
            loss = loss_fn(out, labels)
            loss.backward()
            optimizer.step()

            if i % 50 == 0:
                correct, total, correct_content, total_content = get_correct_and_total_count(labels, out)
                accuracy = correct / total
                accuracy_content = correct_content / total_content
                print(f"{epoch} {i} loss:{loss:.3f}, accuracy:{accuracy:.3f}, accuracy_content:{accuracy_content:.3f}")
    torch.save(model, "./modelSave/ner.model")

model.fine_tuneing(False)
print(f"Few-shot模式参数量：{sum(p.numel() for p in model.parameters())}")
model.fine_tuneing(True)
print(f"微调模式参数量：{sum(p.numel() for p in model.parameters())}")
#model.fine_tuneing(False)


def _test():
    model_load = torch.load("./modelSave/ner.model")
    model_load.to(device)
    model_load.eval()
    test_loader = torch.utils.data.DataLoader(
        dataset=NerDataset("validation"),
        batch_size=128,
        collate_fn=collate_fn,
        shuffle=False,
        drop_last=False
    )
    all_correct = 0
    all_total = 0
    all_correct_content = 0
    all_total_content = 0
    loader_len = len(test_loader)
    for i, (inputs, labels) in enumerate(test_loader):
        input_ids, token_type_ids, attention_mask = inputs["input_ids"].to(device), inputs["token_type_ids"].to(device), inputs["attention_mask"].to(device)
        labels = labels.to(device)
        with torch.no_grad():
            out = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
        if (i + 1) == loader_len:
            for n in range(5):
                # 移除pad
                select = inputs["attention_mask"][n] == 1
                input_id = inputs["input_ids"][n, select]
                print(f"out.shape:{out.shape}")
                out1 = out[n, select].argmax(dim=-1)
                label = labels[n, select]
                print(f"原句子:{tokenizer.decode(input_id).replace(' ', '')}")
                labels_str = ""
                out_str = ""
                for j in range(len(label)):
                    # labels枚举 ["0" , "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
                    labels_str += str(label[j].item())
                    out_str += str(out1[j].item())
                print(labels_str)
                print(out_str)
        out, labels = reshape_and_remove_pad(out, labels, attention_mask)
        correct, total, correct_content, total_content = get_correct_and_total_count(labels, out)
        all_correct += correct
        all_total += total
        all_correct_content += correct_content
        all_total_content += total_content


    print(f"accuracy:{all_correct/all_total:.3f}, accuracy_content:{all_correct_content/all_total_content:.3f}")



train(1, model)
_test()


















