import torch.utils.data
from transformers import BertTokenizer, BertModel
from datasets import load_from_disk, load_dataset

from dltools import dltools

# HF_ENDPOINT=https://hf-mirror.com
tokenizer = BertTokenizer.from_pretrained(
    pretrained_model_name_or_path="bert-base-chinese",
    cache_dir="./MNIST/cache",
    force_download=False
)
sents = [
    "选择珠江花园的原因就是方便。",
    "笔记本的键盘确实爽。",
    "房间太小，其他的都一般。",
    "今天才知道这本书有第六卷，真有点郁闷。",
    "机器背面似乎被撕了张什么标签，残胶还在。"
]
out = tokenizer.encode(
    text=sents[0],
    text_pair=sents[1],
    truncation=True,  # 大于max_length时，进行截断操作。
    padding="max_length",  # 长度不够补pad  'longest'或者True补全到最长的那个句子的长度。'max_length'补全到max_length
    add_special_tokens=True,
    max_length=30,
    return_tensors=None #tf:tf.constant; pt:torch.Tensor; np:np.ndarray; None:list
)
print(f"out:{out}")
print(f"tokenizer.decode(out):{tokenizer.decode(out)}")

# 增强版编码函数
out_plus = tokenizer.encode_plus(
    text=sents[0],
    text_pair=sents[1],
    truncation=True,
    padding="max_length",
    add_special_tokens=True,
    max_length=30,
    return_tensors=None,
    return_token_type_ids=True, # 返回句子的序列 segments
    return_attention_mask=True,
    return_special_tokens_mask=True,
    return_length=True
)
for k, v in out_plus.items():
    print(f"{k}:")
    print(f"{v}")

# 批量
batch_out_plus =tokenizer.batch_encode_plus(
    batch_text_or_text_pairs=[(sents[0], sents[1]), (sents[2], sents[3])],
    truncation=True,
    padding="max_length",
    add_special_tokens=True,
    max_length=30,
    return_tensors=None,
    return_token_type_ids=True,  # 返回句子的序列 segments
    return_attention_mask=True,
    return_special_tokens_mask=True,
    return_length=True
)
print("---------- batch_out_plus")
for k, v in batch_out_plus.items():
    print(f"{k}:")
    print(f"{v}")
vocab = tokenizer.get_vocab()
print(f"len(vocab):{len(vocab)}")
tokenizer.add_tokens(new_tokens=["你好", "大模型"])
tokenizer.add_special_tokens({"eos_token":"[EOS]"})
vocab = tokenizer.get_vocab()
print(f"len(vocab):{len(vocab)}")
print(f"vocab.get(\"大模型\"):{vocab.get('大模型')}")
out = tokenizer.encode(
    text="你好！",
    text_pair="大模型。",
    truncation=True,
    padding="max_length",
    add_special_tokens=True,
    max_length=15,
    return_tensors=None
)
print(f"out:{out}")
print(f"tokenizer.decode(out):{tokenizer.decode(out)}")

"""
# HF_ENDPOINT=https://hf-mirror.com
dataset = load_from_disk("./MNIST/ChnSentiCorp")

print(f"dataset:{dataset}")
train_dataset = dataset["train"]
print(f"train_dataset[0]:{train_dataset[0]}")
print(f"train_dataset[\"label\"]:{train_dataset["label"]}")
sorted_dataset = train_dataset.sort("label")
print(f"sorted_dataset[\"label\"]:{sorted_dataset["label"][-10:]}")
shuffled_dataset = sorted_dataset.shuffle()
print(f"shuffled_dataset[\"label\"]:{shuffled_dataset["label"][:10]}")
selected_dataset = shuffled_dataset.select([1,8,9,11])
print(f"selected_dataset:{selected_dataset}")
def f(data):
    return data["text"].startswith("选择")
filtered_dataset = shuffled_dataset.filter(f)
print(f"filtered_dataset[\"text\"]:{filtered_dataset["text"]}")
split_dataset = shuffled_dataset.train_test_split(test_size=0.1)
print(f"split_dataset:{split_dataset}")
shared_dataset = shuffled_dataset.shard(num_shards=4, index=2)
print(f"shared_dataset:{shared_dataset}")
def f2(data):
    data["text"] = "my sentence:" + data["text"]
    return data
mapped_dataset = shuffled_dataset.map(f2)
print(f"mapped_dataset[\"text\"][:5]:{mapped_dataset["text"][:5]}")
shuffled_dataset.set_format(type="torch", columns=["label"])
print(f"shuffled_dataset[0]:{shuffled_dataset[0]}")
shuffled_dataset.to_csv(path_or_buf="./MNIST/ChnSentiCorp/exp.csv")
csv_dataset = load_dataset(path="csv", data_files="./MNIST/ChnSentiCorp/exp.csv")
print(f"csv_dataset[\"train\"][10]:{csv_dataset["train"][10]}")
"""

class HuggingFaceDataset(torch.utils.data.Dataset):
    def __init__(self, split):
        # HF_ENDPOINT=https://hf-mirror.com
        self.dataset = load_from_disk("./MNIST/ChnSentiCorp")[split]

    def __len__(self):
        return len(self.dataset)

    def __getitem__(self, item):
        return self.dataset[item]


dataset_1 = HuggingFaceDataset("train")
print(f"dataset_1[1]:{dataset_1[1]}")
print(f"len(dataset_1):{len(dataset_1)}")
# HF_ENDPOINT=https://hf-mirror.com
tokenizer_1 = BertTokenizer.from_pretrained(
    pretrained_model_name_or_path="bert-base-chinese",
    cache_dir="./MNIST/cache",
    force_download=False
)

# 重新collate_fn
def collate_fn(data):
    # data为一批次数据
    sents = [item["text"] for item in data]
    labels = [item["label"] for item in data]
    # 编码
    data = tokenizer_1.batch_encode_plus(
        batch_text_or_text_pairs=sents,
        truncation=True,
        padding="max_length",
        return_tensors="pt",
        return_length=True
    )
    return data["input_ids"], data["attention_mask"], data["token_type_ids"], torch.LongTensor(labels)

data_loader_1 = torch.utils.data.DataLoader(
    dataset=dataset_1,
    batch_size=16,
    collate_fn=collate_fn,
    shuffle=True
)

for input_ids, attention_mask, token_type_ids, labels in data_loader_1:
    print(f"input_ids.shape:{input_ids.shape}")
    print(f"attention_mask.shape:{attention_mask.shape}")
    print(f"token_type_ids.shape:{token_type_ids.shape}")
    print(f"labels.shape:{labels.shape}")
    break

class HuggingFaceModel(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.pretrainedModel = BertModel.from_pretrained("bert-base-chinese", cache_dir="./MNIST/cache", force_download=False)
        for param in self.pretrainedModel.parameters():
            # 预训练的模型不在更新参数
            param.requires_grad_(False)
        print(f"len(pretrainedModel.parameters()):{len(list(self.pretrainedModel.parameters()))}")
        self.liner = torch.nn.Linear(768, 2)
        torch.nn.Sigmoid

    def forward(self, input_ids, attention_mask, token_type_ids):
        with torch.no_grad():
            out = self.pretrainedModel(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
            # print(f"out.last_hidden_state.shape:{out.last_hidden_state.shape}")
        return self.liner(out.last_hidden_state[:, 0]).softmax(dim=-1)

model_1 = HuggingFaceModel()

for input_ids, attention_mask, token_type_ids, labels in data_loader_1:
    out = model_1(input_ids, attention_mask, token_type_ids)
    print(f"out.shape:{out.shape}")
    break
def accuracy(pred:torch.tensor, labels:torch.tensor):
    diff = (pred - labels) == 0
    return diff.sum()/len(diff.reshape(-1))

device = dltools.try_gpu()
print(f"device:{device}")
model_1 = model_1.to(device)
trainer = torch.optim.AdamW(model_1.parameters(), lr=5e-4)
loss_fn = torch.nn.CrossEntropyLoss()
model_1.train()
for i, (input_ids, attention_mask, token_type_ids, labels) in enumerate(data_loader_1):
    trainer.zero_grad()
    out = model_1(input_ids.to(device), attention_mask.to(device), token_type_ids.to(device))
    labels = labels.to(device)
    loss = loss_fn(out, labels)
    loss.backward()
    trainer.step()
    if (i+1) % 5 == 0:
        print(f"i:{i} loss:{loss} acc:{accuracy(out.argmax(dim=-1), labels)}")


data_loader_test = torch.utils.data.DataLoader(
    dataset=HuggingFaceDataset("validation"),
    batch_size=32,
    collate_fn=collate_fn,
    shuffle=False
)
model_1.eval()
all_outs = []
all_labels = []
for i, (input_ids, attention_mask, token_type_ids, labels) in enumerate(data_loader_test):
    labels = labels.to(device)
    with torch.no_grad():
        out = model_1(input_ids.to(device), attention_mask.to(device), token_type_ids.to(device))
        all_outs.extend(out.argmax(dim=-1))
    all_labels.extend(labels)
print(f"test acc:{accuracy(torch.tensor(all_outs), torch.tensor(all_labels))}")