from flask import jsonify, request, Response
from datetime import datetime, time
from models import Question, Choice, Bank, QuestionBank, Answer, Exercise, Knowledge
from app.extensions import db
import json
from config import STATIC_FOLDER
import io
from flask import send_file
from docx import Document
from . import chat_bp
import openai
import re
import traceback
import pandas as pd
from datetime import datetime
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain_community import llms
from langchain.memory import VectorStoreRetrieverMemory
from langchain.chains import ConversationChain
from langchain.prompts import PromptTemplate
import faiss
import uuid
from langchain_community.docstore import InMemoryDocstore
from langchain_community.vectorstores import FAISS
import os
from pathlib import Path
from openai import OpenAI

path_prefix = "chatHistory/"

@chat_bp.route('/')
def index():
    return jsonify(u"this is chat url prefix")


@chat_bp.route('/create', methods=["POST"])
def create():
    user_id = request.form.get('user_id')
    user_id = 1
    user_input = request.form.get('user_input')
    # 1.创建文件
    embedding_size = 1536  # OpenAIEmbeddings的维度
    user_index = faiss.IndexFlatL2(embedding_size)
    embedding_fn = OpenAIEmbeddings().embed_query
    vectorstore = FAISS(embedding_fn, user_index, InMemoryDocstore({}), {})
    retriever = vectorstore.as_retriever(search_kwargs=dict(k=5))
    memory = VectorStoreRetrieverMemory(retriever=retriever)
    answer = chat_llm(memory, user_input)
    conversation_uuid = str(uuid.uuid4())
    vectorstore.save_local(path_prefix+conversation_uuid)
    return jsonify({'answer': answer, 'uuid': conversation_uuid}), 200


@chat_bp.route('/chat', methods=["POST"])
def chat():
    user_id = request.form.get('user_id')
    user_id = 1
    user_input = request.form.get('user_input')
    conversation_uuid = request.form.get('uuid')
    embeddings = OpenAIEmbeddings()
    vectorstore = FAISS.load_local(path_prefix+conversation_uuid, embeddings)
    retriever = vectorstore.as_retriever(search_kwargs=dict(k=5))
    memory = VectorStoreRetrieverMemory(retriever=retriever)
    answer = chat_llm(memory, user_input)
    vectorstore.save_local(path_prefix+conversation_uuid)
    return jsonify({'answer': answer}), 200


@chat_bp.route('/text2speech', methods=["POST"])
def text2speech():
    text = request.form.get('text')

    client = OpenAI()
    __file__ = STATIC_FOLDER
    file_name = str(uuid.uuid4())+'.mp3'
    speech_file_path = Path(__file__) / file_name
    response = client.audio.speech.create(
        model="tts-1",
        voice="alloy",
        input=text
    )

    response.stream_to_file(speech_file_path)
    url_prefix = 'http://127.0.0.1:5000/resource/'
    file_url = url_prefix+file_name
    return jsonify(str(file_url)), 200


def chat_llm(memory, user_input):
    llm = llms.OpenAI()  # 可以是任何有效的LLM
    _DEFAULT_TEMPLATE = """以下是人类和AI之间友好的对话。AI健谈并从其上下文中提供了许多具体细节。如果AI不知道问题的答案，它会真诚地说自己不知道。

    先前对话的相关部分：
    {history}

    （如果不相关，您无需使用这些信息）

    当前对话：
    人类：{input}
    AI:"""
    PROMPT = PromptTemplate(
        input_variables=["history", "input"], template=_DEFAULT_TEMPLATE
    )
    conversation_with_summary = ConversationChain(
        llm=llm,
        prompt=PROMPT,
        # 出于测试目的，我们将max_token_limit设置得非常低。
        memory=memory,
        verbose=True
    )
    return conversation_with_summary.predict(input=user_input)











