from pprint import pprint

import torch
from PIL import Image
from transformers import pipeline, QuestionAnsweringPipeline, AutoModelForSequenceClassification, AutoTokenizer
from transformers.pipelines import SUPPORTED_TASKS


torch.cuda.empty_cache()

#查看Pipeline支持的任务类型
pprint(SUPPORTED_TASKS.keys())
for k, v in SUPPORTED_TASKS.items():
	print(k, v)
print("----------------------------------------1------------------------------------------------")
#根据任务类型直接创建Pipeline, 默认都是英文的模型
pipe = pipeline("text-classification")
print(pipe(["very good!", "vary bad!"]))
print("----------------------------------------2------------------------------------------------")
#指定任务类型，再指定模型，创建基于指定模型的Pipeline
pipe = pipeline("text-classification", model = "uer/roberta-base-finetuned-dianping-chinese")
print(pipe("我觉得不太行！"))
print("-----------------------------------------3-----------------------------------------------")
#预先加载模型，再创建Pipeline
model = AutoModelForSequenceClassification.from_pretrained("uer/roberta-base-finetuned-dianping-chinese")
tokenizer = AutoTokenizer.from_pretrained("uer/roberta-base-finetuned-dianping-chinese")
pipe = pipeline("text-classification", model = model, tokenizer = tokenizer)
print(pipe("我觉得不太行！"))
print(pipe.model.device)
print("----------------------------------------4------------------------------------------------")
#使用GPU进行推理
pipe = pipeline("text-classification", model="uer/roberta-base-finetuned-dianping-chinese", device=0)
print(pipe.model.device)
print("----------------------------------------5------------------------------------------------")
#确定Pipeline参数
qa_pipe = pipeline("question-answering", model = "uer/roberta-base-chinese-extractive-qa")
print(qa_pipe)
#点进去看怎么用 有各种参数
QuestionAnsweringPipeline
print(qa_pipe(question = "中国的首都是哪里？", context = "中国的首都是北京", max_answer_len = 2))
print("----------------------------------------6------------------------------------------------")
#其他Pipeline示例0样本检测
checkpoint = "google/owlvit-base-patch32"
detector = pipeline(model = checkpoint, task = "zero-shot-object-detection")
url = "https://unsplash.com/photos/oj0zeY2Ltk4/download?ixid=MnwxMjA3fDB8MXxzZWFyY2h8MTR8fHBpY25pY3xlbnwwfHx8fDE2Nzc0OTE1NDk&force=true&w=640"
im = Image.open("1.jpg")
print(im)
predictions = detector(
	im,
	candidate_labels = ["hat", "sunglasses", "book"],
)
print(predictions)
print("---------------------------------------7-------------------------------------------------")
#Pipeline背后的实现
#初始化tokenizer
tokenizer = AutoTokenizer.from_pretrained("uer/roberta-base-finetuned-dianping-chinese")
#初始化model
model = AutoModelForSequenceClassification.from_pretrained("uer/roberta-base-finetuned-dianping-chinese")
#数据预处理
input_text = "我觉得不太行！"
inputs = tokenizer(input_text, return_tensors = "pt")
print(inputs)
#模型预测
res = model(**inputs)
print(res)
logits = res.logits
logits = torch.softmax(logits, dim = -1)
print(logits)
#获取预测结果
pred = torch.argmax(logits).item()
print(pred)
model.config.id2label
result = model.config.id2label.get(pred)
print(result)
print("-----------------------------------------8-----------------------------------------------")