from django.shortcuts import render, redirect
from django.views import View

from user.models import UserInfo
from transformers import AutoTokenizer, AutoModel
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.docstore.document import Document
from langchain.prompts import PromptTemplate

# Create chat views here.

# 历史聊天记录列表
history = []
# langchain模型
model = None
tokenizer = None
vector_store = None


def load_chat_model(request):
    """加载LLM模型"""
    global model
    global tokenizer
    global vector_store

    if model:
        return redirect('chat:repeat_chat')
    # # 调用langchain API
    # 启用INT4量化
    tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b-int4", trust_remote_code=True)
    # 加载GLM模型
    model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).half().cuda()
    model = model.eval()
    # tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm3-6b", trust_remote_code=True)
    # model = AutoModel.from_pretrained("THUDM/chatglm3-6b", trust_remote_code=True).half().cuda()
    # model = model.eval()
    response = redirect('chat:repeat_chat')
    try:
        # # 创建词嵌入对象
        # embeddings = HuggingFaceBgeEmbeddings(
        #     model_name="BAAI/bge-large-zh-v1.5",  # 指定词嵌入使用的模型
        #     model_kwargs={'device': 'cuda'},
        #     encode_kwargs={'normalize_embeddings': True},  # 设置为True计算余弦相似度
        # )
        # # 如果有向量数据库的话就加载
        # vector_store = FAISS.load_local(os.path.join(settings.STATICFILES_DIRS[0], 'vector_store'), embeddings)

        return response

    except Exception as e:
        return response


class QuestionsAnswers(View):
    """智能问答核心逻辑"""

    def get(self, request):
        global history
        global model
        global tokenizer
        global vector_store
        # 判断模型是否初始化
        if model is None:
            response = redirect('chat:chat_init')
            return response
        # 获取当前登陆的用户信息
        user_name = request.session.get('user_name')
        user = UserInfo.objects.get(name=user_name)
        # 如果会话没有消息密钥，请创建消息密钥
        if 'messages' not in request.session:
            request.session['messages'] = [
                {"role": "system", "content": "你现在正在与用户聊天，为他们提供全面、简短、简洁的答案。"},
            ]

        context = {
            'messages': request.session['messages'],
            'prompt': '',
            'user': user,
        }
        return render(request, 'chat/question_answer.html', context)

    def post(self, request):
        global history
        global model
        global tokenizer
        # 判断模型是否初始化
        if model is None:
            response = redirect('chat:chat_init')
            return response
        # 获取当前登陆的用户信息
        user_name = request.session.get('user_name')
        user = UserInfo.objects.get(name=user_name)
        # 从表单中获取提示
        prompt = request.POST.get('prompt')
        # 将提示追加到消息列表
        request.session['messages'].append({"role": "user", "content": prompt})
        # 设置会话为修改会话
        request.session.modified = True
        response, history = model.chat(tokenizer, prompt, history=history)
        # 将响应追加到消息列表
        request.session['messages'].append({"role": "assistant", "content": response})
        # request.session['messages'].append({"role": "assistant", "content": "我还不知道答案"})
        request.session.modified = True
        # # 重定向到主页
        context = {
            'messages': request.session['messages'],
            'prompt': '',
            'user': user,
        }
        return render(request, 'chat/question_answer.html', context)


def new_chat(request):
    # 清除消息列表
    global history
    history = []
    # 判断模型是否初始化
    if model is None:
        response = redirect('chat:chat_init')
        return response
    request.session.pop('messages', None)
    return redirect('chat:repeat_chat')
