Spaces:
Sleeping
Sleeping
File size: 576 Bytes
3c0c826 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
from transformers import AutoTokenizer
from transformers import AutoModelForQuestionAnswering
from transformers import QuestionAnsweringPipeline
model_name = 'KoichiYasuoka/bert-base-japanese-wikipedia-ud-head'
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
qap = QuestionAnsweringPipeline(tokenizer=tokenizer, model=model)
def generate_response(st, prompt):
# Transformersで回答を作成
answer = qap(context=st.session_state.content[:100], question=prompt)
return answer["answer"]
|