import warnings
import torch
from torch import nn

warnings.filterwarnings("ignore")
from transformers import pipeline
from transformers import AutoTokenizer, AutoModel  # 自动判断
from transformers import AutoModelForSequenceClassification

"""
    设置TRANSFORMERS_CACHE换将变量可以修改huggingface的存储地址
"""

text_raw = [
    "I've been waiting for a HuggingFace course my whole life.",
    "I hate this so much!",
    "I love this so much!",
    "I am so bored",
    "I am so bored",
]


def test_classifier():
    classifier = pipeline("sentiment-analysis")
    print(classifier(text_raw))


def test_tokenizer():
    tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
    raw_inputs = [
        "I've been waiting for a HuggingFace course my whole life.",
        "I hate this so much!"
    ]
    inputs = tokenizer(raw_inputs, padding=True, truncation=True, return_tensors="pt")
    print(inputs)
    text = tokenizer.decode([101, 1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172,
                             2607, 2026, 2878, 2166, 1012, 102])
    print(text)


def test_model():
    model = AutoModel.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
    tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
    inputs = tokenizer(text_raw, padding=True, truncation=True, return_tensors="pt")
    print(model(**inputs).last_hidden_state.shape)
    print(model)


def test_model_with_output_head():
    model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
    tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
    inputs = tokenizer(text_raw, padding=True, truncation=True, return_tensors="pt")
    outputs = model(inputs.input_ids)
    print(outputs.logits.shape)
    print(model)
    outputs_logits = nn.functional.softmax(outputs.logits, dim=-1)
    print(outputs_logits)


def test_id2label():
    """id2label可以自己指定"""
    model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
    print(model.config.id2label)
    tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
    print(model(tokenizer(text_raw, padding=True, truncation=True, return_tensors="pt").input_ids))


model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")


def test_padding_and_attentionmask():
    sequence1_ids = [[200, 200, 200]]
    sequence2_ids = [[200, 200]]
    batched_ids = [
        [200, 200, 200],
        [200, 200, tokenizer.pad_token_id],
    ]
    print(model(torch.tensor(sequence1_ids)).logits)
    print(model(torch.tensor(sequence2_ids)).logits)
    print(model(torch.tensor(batched_ids)).logits)
    print("*" * 100)
    batched_ids = [
        [200, 200, 200],
        [200, 200, tokenizer.pad_token_id],
    ]
    attention_mask = [
        [1, 1, 1],
        [1, 1, 0],
    ]
    outputs = model(torch.tensor(batched_ids), attention_mask=torch.tensor(attention_mask))
    print(outputs.logits)


def test_different_padding_method():
    sequences = ["I've been waiting for a this course my whole life.", "So have I!", "I played basketball yesterday."]
    # 按照最长的填充
    model_inputs = tokenizer(sequences, padding="longest")
    print(model_inputs)
    # BERT默认最大是512
    model_inputs = tokenizer(sequences, padding="max_length")
    print(model_inputs)
    # 填充到多少
    model_inputs = tokenizer(sequences, padding="max_length", max_length=8)
    print(model_inputs)
    # 到多少就截断
    model_inputs = tokenizer(sequences, max_length=10, truncation=True)
    print(model_inputs)
    # 最好返回tensor
    model_inputs = tokenizer(sequences, padding=True, return_tensors="pt")
    print(model_inputs)


if __name__ == "__main__":
    """"""
    # test_classifier()
    # test_tokenizer()
    # test_model()
    # test_model_with_output_head()
    # test_id2label()
    # test_padding_and_attentionmask()
    test_different_padding_method()
