
from django.http import JsonResponse,StreamingHttpResponse
import json
import random
from dashscope import Generation
from http import HTTPStatus
from django.core import serializers 
from robot import models
from django.forms.models import model_to_dict
from django.views.decorators.http import require_GET,require_POST,require_http_methods
from django.views.decorators.csrf import csrf_exempt

import os
from mychat import settings
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain.embeddings.dashscope import DashScopeEmbeddings
from langchain_community.llms.tongyi import Tongyi
# 检索问答
from langchain.chains import RetrievalQA

from langchain.chains.llm import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate


from dashscope import Generation




messages=[]


@csrf_exempt
@require_POST 
def newchat(request): 
    # 模型参数top_p\top_k,prompt
    # {
    #     "top_p": 0.7,
    #     "top_k": 1,
    #     "prompt": "你好"
    # }

    data = json.loads(request.body)    
    
    # top_p=data['top_p']
    # top_k=data['top_k']
    prompt=data['prompt']
    
    messages.append({"role": "user", "content": prompt})
    
    
    response = Generation.call(model="qwen-max",  
                                messages=messages,  
                                result_format='message')  
    
    #   response = Generation.call(model="qwen-1.8b-chat",  
    #                             messages=messages,  
    #                             result_format='message') 
    
    if response.status_code == HTTPStatus.OK:  
        print(response.output.choices[0]['message']['content'])  # 直接打印回复内容  
        messages.append({'role': 'assistant',  # 假设这里我们假设回复的role是'assistant'  
                        'content': response.output.choices[0]['message']['content']})  
        return JsonResponse({"code":200,'message':response.output.choices[0]['message']['content']})
    else:  
        print('Request id: %s, Status code: %s, error code: %s, error message: %s' % (  
            response.request_id, response.status_code,  
            response.code, response.message  
        )) 


# def generate_sse(responses):
#     for response in responses:
#         if response.status_code == HTTPStatus.OK:
#             data1 = response.output.choices[0]['message']['content']
#             data = f"data: {data1}\n\n"
#             if data1:
#                 yield data.encode('utf-8')  # 必须编码为字节串
#             else:
#                 return "no mes"

def sse_notifications(request):
    prompt = request.GET.get('prompt')  # 调用函数获取用户输入   
    messages.append({'role': 'user', 'content': prompt})  
    print('1111111111111111')

    responses = Generation.call(model="qwen-1.8b-chat",  
                            messages=messages,  
                            result_format='message',
                            stream=True,  # 设置输出方式为流式输出
                            incremental_output=True  # 增量式流式输出
                            ) 
    
    
    response = StreamingHttpResponse(
        generate_sse(responses),
        content_type="text/event-stream",
    )
    response["Cache-Control"] = "no-cache"
    return response

























@csrf_exempt
@require_http_methods(["POST"])
def uploadFile(request):
    file = request.FILES['file']
         
    # 生成文件路径  
    file_path = os.path.join(settings.MEDIA_ROOT, file.name)  
    with open(file_path, 'wb+') as destination:  
        for chunk in file.chunks():  
            destination.write(chunk) 
    
    # 存入向量数据库
    add_chroma(file_path)        
    return JsonResponse({"code":200})   

db = None
def add_chroma(file_path) :   
    
    global db

    # 实例化TextLoader对象
    loader = TextLoader(file_path,encoding="utf-8")
    # 加载文档
    docs = loader.load()
    
    # 初始化分割器
    split = CharacterTextSplitter("\n",chunk_size=200, chunk_overlap=0)
    chunks = split.split_documents(docs)
    
    # 实例化embedding对象
    embedding = DashScopeEmbeddings()
    
    # 初始化向量数据库，如果数据库为空，则创建，否则添加数据
    if db == None:
        db_path = os.path.join(settings.DB_ROOT, "chroma")  
        db = Chroma.from_documents(chunks, embedding, persist_directory=db_path)        
    else:
        db.add_documents(chunks)
    
    # 数据保存到本地
    db.persist()
    

    
@csrf_exempt
@require_http_methods(["POST"])    
def chat(request): 
    
    global db
    
    try:
        data = json.loads(request.body)
        # 现在你可以使用data里的数据了
        # 例如：your_processing_function(data)
    except json.JSONDecodeError:
        return JsonResponse({'error': 'Invalid JSON'}, status=400)
    
    question = data.get('question')
    
    llm = Tongyi()
    
    if db == None:
        db_path = os.path.join(settings.DB_ROOT, "chroma")  
        db = Chroma(persist_directory=db_path, embedding_function=DashScopeEmbeddings())
    
    qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=db.as_retriever())
        
    ret = qa.invoke(question)
    
    return JsonResponse({'status': 'ok','data': ret})       


template = """你是一个机器人助理。
{chat_history}
user:{human_input}
AI:"""

prompt= PromptTemplate(
    template=template,
    input_variables=["chat_history", "human_input"],
)

memory = ConversationBufferMemory(
    memory_key="chat_history",
)

chain = LLMChain(
    llm=Tongyi(model_name="qwen-1.8b-chat"),
    memory=memory,
    prompt=prompt,
    verbose=True,
)

@csrf_exempt
@require_http_methods(["POST"])
def chat1(request):
    try:
        data = json.loads(request.body)
        # 现在你可以使用data里的数据了
        # 例如：your_processing_function(data)
    except json.JSONDecodeError:
        return JsonResponse({'error': 'Invalid JSON'}, status=400)
    
    
    question = data.get('question')
    # session = data.get('session')
    
    # if (session == ""):
    #     data = {'name': question, 'content': data.get('question')}
    #     models.History.objects.create(**data)
           
    ret = chain.invoke(question)
    print(ret)
    
    return JsonResponse({'status': 'ok','data': ret})   
    
    #根据id查询只有一个,直接get即可
        
    # messages.append({"role": "user", "content": question})
    # response = Generation.call(model="qwen-turbo",
    #                         messages=messages,
    #                         # 设置随机数种子seed，如果没有设置，则随机数种子默认为1234
    #                         seed=random.randint(1, 10000),
    #                         # 将输出设置为"message"格式
    #                         result_format='message')
    # if response.status_code == HTTPStatus.OK:
    #     print(response) 
    #     ret = response['output']['choices'][0]['message']['content']
        
    #     if (session != ""):
    #         obj = models.History.objects.get(name=session) 
            
    #         obj.content="用户："+question+"\n回答："+ret
            
    #         obj.save()
        
    #     messages.append({'role': 'assistant', 'content': ret})
    #     return JsonResponse({'status': 'ok','data': ret})       
    # else:
    #     print('Request id: %s, Status code: %s, error code: %s, error message: %s' % (
    #         response.request_id, response.status_code,
    #         response.code, response.message
    #     ))
    #     return JsonResponse({'status': 'error'})
    
    
    

ssemessages=[]
 
def generate_sse(responses):
    for response in responses:
        if response.status_code == HTTPStatus.OK:
            data1 = response.output.choices[0]['message']['content']
            data = f"data: {data1}\n\n"
            if data1:
                yield data.encode('utf-8')  # 必须编码为字节串
            else:
                return "no message"

def ssechat(request):
    # try:
    #     data = json.loads(request.body)
    #     # 现在你可以使用data里的数据了
    #     # 例如：your_processing_function(data)
    # except json.JSONDecodeError:
    #     return JsonResponse({'error': 'Invalid JSON'}, status=400)
    
    # ssemessages.append({"role": "user", "content": data.get("question")})
    question = request.GET.get('question')  # 调用函数获取用户输入  
     
    ssemessages.append({"role": "user", "content": question})
 
    responses = Generation.call(model="qwen-1.8b-chat",  
        messages=ssemessages,  
        result_format='message',
        stream=True,  # 设置输出方式为流式输出
        incremental_output=True  # 增量式流式输出
    ) 
    print('222222222')
    
    response = StreamingHttpResponse(
        generate_sse(responses),
        content_type="text/event-stream",
    )
    response["Cache-Control"] = "no-cache"
    return response
@csrf_exempt
def addsession(request):
    try:
        data = json.loads(request.body)
        # 现在你可以使用data里的数据了
        # 例如：your_processing_function(data)
    except json.JSONDecodeError:
        return JsonResponse({'error': 'Invalid JSON'}, status=400)
    models.History.objects.create(**data)
    return JsonResponse({'status': 'ok'})
@csrf_exempt
def sessionList(request):
    list = models.History.objects.all()
    return JsonResponse(serializers.serialize("json",list), safe=False)

@csrf_exempt
def delsession(request):
    name = request.POST.get('id')
    models.History.objects.filter(name=name).delete()
    return JsonResponse({'status': 'ok'})

@csrf_exempt
def getsession(request):   
    name = request.GET.get('name')
    obj = models.History.objects.get(name=name)
    return JsonResponse( model_to_dict(obj), safe=False)



    