'''
Author: zhanwei xu
Date: 2023-11-19 16:53:10
LastEditors: zhanwei xu
LastEditTime: 2024-02-02 11:09:47
Description: 

Copyright (c) 2023 by zhanwei xu, Tsinghua University, All Rights Reserved. 
'''

from poe_api_wrapper import PoeApi
import textwrap
import json
import os
import numpy as np
import pandas as pd

import functools
import asyncio
from concurrent.futures import ThreadPoolExecutor
import google.generativeai as genai
import requests
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
from langchain.document_loaders import TextLoader, PyPDFLoader
from langchain.document_loaders import Docx2txtLoader
from langchain.document_loaders import UnstructuredHTMLLoader
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.document_loaders import UnstructuredFileLoader
from langchain.document_loaders import UnstructuredPowerPointLoader
from langchain.document_loaders import UnstructuredExcelLoader
from langchain.document_loaders import UnstructuredMarkdownLoader




async def get_file_content(file_path):
    chunk_size = 1000
    chunk_overlap = 0
    if os.path.exists(file_path):
        # genai.configure(api_key=client_token)
        if file_path.endswith(".pdf"):
            loader = PyPDFLoader(file_path)
        elif file_path.endswith(".docx") or file_path.endswith(".doc"):
            loader = Docx2txtLoader(file_path)
        elif file_path.endswith(".pptx"):
            loader = UnstructuredPowerPointLoader(file_path)
        elif file_path.endswith(".xlsx") or file_path.endswith(".xls"):
            loader = UnstructuredExcelLoader(file_path, mode="elements")
        elif file_path.endswith(".csv"):
            loader = CSVLoader(file_path)
        elif file_path.endswith(".md"):
            loader = UnstructuredMarkdownLoader(file_path)
        else:
            loader = TextLoader(file_path)
        splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
        chunks = loader.load_and_split(splitter)
        content = ""
        for chunk in chunks:
            content+=chunk.page_content
        return content
async def async_request_4(question,content,message,user_key):

    url = "https://www.zaiwenai.top/message_4"
    prompt = f'''
    "Article:
    {content}

    Question:
    {question}
    
    Based on the above Article, answer the Question above using the language of the Question.
    "
    '''
    message.append({"content":prompt,"role":"user"})
    payload = {
        "message": message,
        "mode": "beaver_1106",
        "key":user_key
    }
    headers = {
        'User-Agent': 'Apifox/1.0.0 (https://apifox.com)',
        'Content-Type': 'application/json'
    }
    loop = asyncio.get_running_loop()
    request = functools.partial(requests.post, url, headers=headers, json=payload, stream = True, timeout = 20)
    try:
        with ThreadPoolExecutor() as executor:
            response = await loop.run_in_executor(
                executor, request
            )
    except:
        response = requests.Response()
        response.status_code = 500
    return response
def chattext_chatgpt(response):
    for chunk in response.iter_content(chunk_size=1024):
        yield chunk


def generate_text_poe(client_token,filename, question):
    if os.path.exists(filename):
        mode = 'a2_100k'
        # try:
        client = PoeApi(client_token)
        data= client.get_chat_history()['data']
        result = []
        for key, value in data.items():
            for item in value:
                result.append((key, item['chatId']))
        for chatId in result:
            try:
                client.delete_chat(chatId[0], chatId[1])
            except:
                pass
        # Create new chat thread
        # Streamed example:
        print(filename)
        # text = ''
        for chunk in client.send_message(mode, question,file_path=[filename]):
            yield chunk["response"]

    else:
        yield '文件已被清理，请重新上传文件'
def generate_text(client_token, question):
    mode = 'a2_100k'
    # try:
    client = PoeApi(client_token)
    data= client.get_chat_history()['data']
    result = []
    for key, value in data.items():
        for item in value:
            result.append((key, item['chatId']))
    for chatId in result:
        try:
            client.delete_chat(chatId[0], chatId[1])
        except:
            pass
    # Create new chat thread
    # Streamed example:
    for chunk in client.send_message(mode, question):
        yield chunk["response"]


def get_single_asset(asset_id):
    url = "https://user.chatcns.com/asset/get"
    body = {"id": asset_id}
    response = requests.get(url, json=body)
    return response.json()


def get_multiple_assets(asset_keys):
    url = "https://user.chatcns.com/asset/list"
    body = {"keys": asset_keys}
    response = requests.post(url, json=body)
    return response.json()


def generate_text_gemini(client_token, question,df):
    genai.configure(api_key=client_token)
    model = 'models/embedding-001'
    def find_best_passage(query, dataframe):
        """
        Compute the distances between the query and each document in the dataframe
        using the dot product.
        """
        query_embedding = genai.embed_content(model=model,
                                              content=query,
                                              task_type="retrieval_query")
        dot_products = np.dot(np.stack(dataframe['Embeddings']), query_embedding["embedding"])
        idx = np.argmax(dot_products)
        return dataframe.iloc[idx]['Text']  # Return text from index with max value

    def get_all_passage(query, dataframe):
        text = ""
        for index,row in dataframe.iterrows():
            text+=row['Text']

        return text  # Return text from index with max value

    def make_prompt(query, relevant_passage):
        escaped = relevant_passage.replace("'", "").replace('"', "").replace("\n", " ")
        prompt = textwrap.dedent("""你是在问网站的客服代表，专门回答用户的问题。你的回答将基于在问网站提供的文档内容。\
        请确保以完整的句子回答，内容全面，包含所有相关的背景信息。\
        同时，由于你的受众可能不具备专业背景，因此请用简单易懂的语言解释复杂概念，保持友好和易于交流的语气。\
        如果提供的文档与问题无关，你可以忽略它。
        问题: '{query}'
        文档: '{relevant_passage}'
        回答:
        """).format(query=query, relevant_passage=escaped)

        return prompt
    # passage = find_best_passage(question, df)
    passage = get_all_passage(question, df)
    prompt = make_prompt(question, passage)
    model = genai.GenerativeModel('gemini-pro')
    response = model.generate_content(prompt,stream=True)
    for chunk in response:
        yield chunk.text


def get_chunk_from_document(client_token,file_path, chunk_size=1000, chunk_overlap=20):
    if os.path.exists(file_path):
        # genai.configure(api_key=client_token)
        if file_path.endswith(".pdf"):
            loader = PyPDFLoader(file_path)
        elif file_path.endswith(".docx") or file_path.endswith(".doc"):
            loader = Docx2txtLoader(file_path)
        elif file_path.endswith(".pptx"):
            loader = UnstructuredPowerPointLoader(file_path)
        elif file_path.endswith(".xlsx") or file_path.endswith(".xls"):
            loader = UnstructuredExcelLoader(file_path, mode="elements")
        elif file_path.endswith(".csv"):
            loader = CSVLoader(file_path)
        elif file_path.endswith(".md"):
            loader = UnstructuredMarkdownLoader(file_path)
        else:
            loader = TextLoader(file_path)
        splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
        chunks = loader.load_and_split(splitter)
        # 循环chunks，得到每个chunk的page_content,生成一个df，这个df只有一列，列名为Text
        df = pd.DataFrame(columns=['Text'])
        for chunk in chunks:
            df.loc[len(df)] = [chunk.page_content]
        model = 'models/embedding-001'
        genai.configure(api_key=client_token)

        def embed_fn(text):
            return genai.embed_content(model=model,
                                       content=text,
                                       task_type="retrieval_document")["embedding"]
        df['Embeddings'] = df.apply(lambda row: embed_fn(row['Text']), axis=1)
        return df

