"""
此程序为使用本地部署的方式构建DeepSeek智能应用的后端程序
"""

from langchain_community.llms.ollama import Ollama
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
import requests
import PyPDF2  # 用于提取PDF文件中的文本

# PDF文件的路径
PDF_FILE_PATH = "pdf" #此处替换为你自己需要上传的知识库文件路径


# 调用 DeepSeek 模型的函数
def call_deepseek(
        prompt = 'hello!',                                      # 提示词
        conversationList = [{"role":"user","content":"hello"}], # 聊天记录
        Contextual_relevance_on = False                         # 是否开启上下文关联
        ):
    url = "http://127.0.0.1:11434/api/generate"
    headers = {
        'Content-Type': 'application/json',
        'Accept': 'application/json'
    }
    if(Contextual_relevance_on):
        History = '\n'.join([entry['role'] + ':' + entry['content'] for entry in conversationList])
        combine_prompt = (
            f"{History}\n"
            f"你是：assistant，我是：user\n"
            f"{prompt}"
        )
        # print(combine_prompt)
        data = {
            "model": "deepseek-r1:8b",
            # "message": conversationList,
            "prompt": combine_prompt,
            "stream": True,
            "options": {              # 可选参数
                "temperature": 0.2,
                "max_tokens": 100
            }
        }
    else:
        data = {
            "model": "deepseek-r1:8b",   # 此处替换为你自己本地部署的模型参数
            "prompt": prompt,
            "stream": True
        }
    response = requests.post(url, json=data ,headers=headers, stream = True)    # 向本地部署的deepseek api发送一个post请求获取回答
    response.encoding = 'utf-8'
    # return response.json()["response"]
    '''
    if response.status_code == 200:
        for chunk in response.iter_content(chunk_size=8192):
            pass
    else:
        print(str("请求失败，状态码：" + response.status_code))
    '''
    return response
