
import os


# os.environ["HUGGINGFACE_HUB_URL"] = "https://mirrors.tuna.tsinghua.edu.cn/hugging-face-hub"
# os.environ["HF_DATASETS_URL"] = "https://mirrors.tuna.tsinghua.edu.cn/hugging-face-datasets"
# os.environ["HF_METRICS_URL"] = "https://mirrors.tuna.tsinghua.edu.cn/hugging-face-metrics"
os.environ["HF_HOME"] = "/Users/luckincoffee/Desktop/project/python/python-demo/home_cache"
# 设置缓存路径
# os.environ["TRANSFORMERS_CACHE"] = "/Users/luckincoffee/Desktop/project/python/python-demo/model_cache"
os.environ["HF_DATASETS_CACHE"] = "/Users/luckincoffee/Desktop/project/python/python-demo/data_cache"

from transformers import pipeline, AutoModel, AutoModelForSequenceClassification, AutoTokenizer

# print("开始执行")
# # 制定任务类型，再制定模型，创建基于执行模型的pipeline- 使用中文的
# pipe=pipeline("text-classification","uer/roberta-base-chinese-extractive-qa",force_download=false)
# print(pipe("你是个大坏蛋"))
# print("执行结束")

# txt="你好,你真是一个大坏蛋,我不喜欢你"
# print("开始执行")
# model=AutoModelForSequenceClassification.from_pretrained(
#     "uer/roberta-base-chinese-extractive-qa")
# tokenizer=AutoTokenizer.from_pretrained("uer/roberta-base-chinese-extractive-qa")
# print(tokenizer.tokenize(txt))
#
# pipe=pipeline("text-classification",model=model,tokenizer=tokenizer,device=0)
# print(pipe(txt))

# print(pipe.model.device)


# 设置qa_pipe
#
# qa_pipe=pipeline("question-answering",model="uer/roberta-base-chinese-extractive-qa",
#                  tokenizer="uer/roberta-base-chinese-extractive-qa")
#
# print(qa_pipe({"question":"你是一个大坏蛋吗","context":"你是一个大坏蛋"}))


# 零样本目标检测
# detector= pipeline(model='google/owlvit-base-patch32',task='zero-shot-object-detection')
#
# from PIL import Image
#
# image=Image.open("dog.jpeg")
# result=detector(image,candidate_labels=['dog','cat','person'])
# print(result)
# from PIL import ImageDraw
# draw = ImageDraw.Draw(image)
#
# for pre in result:
#     print(pre)
#     box, label, score=pre['box'],pre['label'],pre['score']
#     xmin, ymin, xmax, ymax = box.values()
#     draw.rectangle((xmin, ymin, xmax, ymax), outline='red')
#     draw.text((xmin, ymin), f'{label}: {score:.2f}', fill='red')
#
#
# image.show()


# Step1 初始化Tokenizer
import  torch
from transformers import pipeline, AutoModel, AutoModelForSequenceClassification, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("uer/roberta-base-finetuned-dianping-chinese")
# Step2 初始化Model
model = AutoModelForSequenceClassification.from_pretrained("uer/roberta-base-finetuned-dianping-chinese")
# Step3 数据預处理
input_text ="我觉得不太行！"
inputs = tokenizer(input_text, return_tensors="pt")
print(inputs)
# Step4 模型预测
res = model(**inputs).logits
print(res)
# Step5 结果后处理
pred = torch.argmax(torch.softmax(res, dim=-1))
print(pred)
print(model.config.id2label)
result = model.config.id2label.get(pred.item())
print(result)