'''
Author: zhao-leo 18055219130@163.com
Date: 2024-10-22 20:38:03
LastEditTime: 2024-10-22 23:05:36
'''
from transformers import AutoTokenizer, AutoModelForMaskedLM
import torch
from .basic_model import BasicModel

# 设置本地模型路径
from transformers import pipeline

#model_path = "./.cache/models/mdeberta-v3-base-squad2/"
class Squad2(BasicModel):
  def __init__(self, model_path: str):
    super().__init__(model_path)

  def main_logic(self):
    model_path = self.model_path
    # Initialize the QA model pipeline with the local model
    qa_model = pipeline("question-answering", model=model_path,)

    # Define your question and context
    question = "What will do?"
    context = "Students can log in the information portal - Student work system - Student service - Undergraduate Comprehensive assessment, and start their own comprehensive quality assessment, until Sunday"

    result = qa_model(question=question, context=context)
    result_data = result["answer"]

    question2 = "When to" + result_data + '?'
    result2 = qa_model(question=question2, context=context)

    question3 = "Who will" + result_data + '?'
    result3 = qa_model(question=question3, context=context)

    question4 = "Where to" + result_data + '?'
    result4 = qa_model(question=question4, context=context)

    result = "{}\n{}\n{}\n{}\n".format(result, result2, result3, result4)
    return result
