# import torch
# from Server import  chatglmpro, qianwen7b, qianwen, enire
#
# test2=[
# "你好，你是谁？",
# "我很烦恼，你能帮助我吗？",
# "我最近经常失眠，整晚整晚的睡不着。",
# "我感觉我的父母总是不理解我，他们总是指责我。",
# "我好想快点长大，成为一个独立自主的人"
#
# ]
#
#
# def answer(ask,history,choose,choose_model):
#     if len(history)>16:
#         history.pop(0)
#
#     elif choose=="chatglm-pro":
#         return botmodel(chatglmpro,choose_model,ask,history)
#     elif choose=="文心一言":
#         return  botmodel(enire,choose_model,ask,history)
#     elif choose=="通义千问-7b":
#         return botmodel(qianwen7b,choose_model,ask,history)
#     elif choose=="通义千问":
#         return botmodel(qianwen,choose_model,ask,history)
#
#
# def botmodel(chatbot,choose_model,ask,history):
#     if choose_model == "原生":
#         return chatbot.answer_org(ask, history)
#     elif choose_model == "助手":
#         return chatbot.answer(ask, history)
#     elif choose_model == "AI咨询师":
#         return chatbot.answer_private(ask, history)
#
#
# models=[ "chatglm-pro"]
# mode=["原生"]
# for model in models:
#     for m in mode:
#         history=[]
#         for ask in test2:
#             allres = answer(ask,history, model, m)
#             for all_history in allres:
#                 #注意这里是流式返回，所以我会把每一次的返回保存到最后的history中
#                       history=all_history
#
#
#         print(model)
#         print(m)
#         print(history)
