{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# deepseek原生api"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Hello! How can I assist you today? 😊\n"
     ]
    }
   ],
   "source": [
    "# Please install OpenAI SDK first: `pip3 install openai`\n",
    "\n",
    "from openai import OpenAI\n",
    "\n",
    "client = OpenAI(api_key=\"sk-2bfe84561fa4422e9ad0f9060d88b1d7\",\n",
    "                base_url=\"https://api.deepseek.com\")\n",
    "\n",
    "response = client.chat.completions.create(\n",
    "    model=\"deepseek-chat\",\n",
    "    messages=[\n",
    "        {\"role\": \"system\", \"content\": \"You are a helpful assistant\"},\n",
    "        {\"role\": \"user\", \"content\": \"Hello\"},\n",
    "    ],\n",
    "    stream=False,\n",
    "    max_tokens=50\n",
    ")\n",
    "\n",
    "print(response.choices[0].message.content)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# deepseek huggingface 模型加载"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "# 本地部署huggingface模型测试\n",
    "\n",
    "from transformers import AutoTokenizer, AutoModelForCausalLM\n",
    "\n",
    "model_name = \"/e/Resources/LLM/huggingface/DeepSeek-R1-Distill-Qwen-1.5B\"\n",
    "# 加载分词器和模型\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
    "\n",
    "model = AutoModelForCausalLM.from_pretrained(model_name)\n",
    "# model = AutoModelForCausalLM.from_pretrained(model_name).to(\"cuda\")\n",
    "\n",
    "print(\"-----模型加载成功-----\")\n",
    "# 将模型设置为评估模式\n",
    "model.eval()\n",
    "\n",
    "#定义问答函数\n",
    "def generate_answer(question, model, tokenizer, max_length=100):\n",
    "    # 将问题编码为输入张量\n",
    "    inputs = tokenizer(question, return_tensors=\"pt\")\n",
    "\n",
    "    # 生成答案\n",
    "    with torch.no_grad():\n",
    "        outputs = model.generate(\n",
    "            inputs.input_ids,\n",
    "            max_length=max_length,  # 控制生成的最大长度\n",
    "            num_return_sequences=1,  # 只生成一个答案\n",
    "            no_repeat_ngram_size=2,  # 避免重复短语\n",
    "            early_stopping=True  # 提前停止生成\n",
    "        )\n",
    "\n",
    "    # 解码生成的答案\n",
    "    answer = tokenizer.decode(outputs[0], skip_special_tokens=True)\n",
    "    return answer\n",
    "\n",
    "# 测试问答\n",
    "question = \"什么是人工智能？\"\n",
    "\n",
    "# 生成答案\n",
    "answer = generate_answer(question, model, tokenizer)\n",
    "print(f\"Q: {question}\")\n",
    "print(f\"A: {answer}\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# deepseek：chat"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from openai import OpenAI\n",
    "url = 'https://api.deepseek.com/v1/'\n",
    "api_key = 'sk-2bfe84561fa4422e9ad0f9060d88b1d7'\n",
    " \n",
    "client = OpenAI(\n",
    "    base_url=url,\n",
    "    api_key=api_key\n",
    ")\n",
    " \n",
    "# 发送非流式输出的请求\n",
    "content=\"你是ai\"\n",
    "messages = [\n",
    "    {\"role\": \"assistant\", \"content\": content},\n",
    "    {\"role\": \"user\", \"content\": \"你是谁？？\"}\n",
    "]\n",
    "response = client.chat.completions.create(\n",
    "    model=\"deepseek-chat\",\n",
    "    messages=messages,\n",
    "    stream=False,\n",
    "    max_tokens=4096\n",
    ")\n",
    "print(response.choices[0].message.content)\n",
    "# reasoning_content = response.choices[0].message.reasoning_content\n",
    " \n",
    "# Round 2\n",
    "print('')\n",
    "request='请总结一下'\n",
    "print(request)\n",
    "messages.append({\"role\": \"assistant\", \"content\": content})\n",
    "messages.append({'role': 'user', 'content': request})\n",
    "response = client.chat.completions.create(\n",
    "    model=\"deepseek-chat\",\n",
    "    messages=messages,\n",
    "    stream=False\n",
    ")\n",
    "\n",
    "print(response.choices[0].message.content)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# deepseek : reasoner stream"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from openai import OpenAI\n",
    "\n",
    "url = 'https://api.deepseek.com'\n",
    "# api_key = 'sk-f2adede53b0775bce787700f6fe62d5d6a5470b36a1f7af282a231a495225f20'\n",
    "api_key = 'sk-2bfe84561fa4422e9ad0f9060d88b1d7'\n",
    "\n",
    "client = OpenAI(\n",
    "    base_url=url,\n",
    "    api_key=api_key\n",
    ")\n",
    "\n",
    "# 发送带有流式输出的请求\n",
    "messages = [\n",
    "    {\"role\": \"system\", \"content\": \"你是一个ai助理\"},\n",
    "    {\"role\": \"user\", \"content\": \"你好！请介绍下你自己。\"},\n",
    "]\n",
    "response = client.chat.completions.create(\n",
    "    model=\"deepseek-reasoner\",\n",
    "    messages=messages,\n",
    "    stream=True,  # 启用流式输出\n",
    "    max_tokens=1024\n",
    ")\n",
    "# 逐步接收并处理响应:注意没有reasoning_content\n",
    "# for chunk in response:\n",
    "#     if len(chunk.choices) > 0 and chunk.choices[0].delta.content:\n",
    "#         print(chunk.choices[0].delta.content, end='')  # 逐步输出\n",
    "# print()\n",
    "\n",
    "for chunk in response:\n",
    "    if len(chunk.choices) > 0 and chunk.choices[0].delta.content:\n",
    "        content += chunk.choices[0].delta.content\n",
    "        print(chunk.choices[0].delta.content, end='')\n",
    "\n",
    "print()\n",
    "\n",
    "# Round 2\n",
    "messages.append({\"role\": \"assistant\", \"content\": content})\n",
    "\n",
    "request = \"你有什么优点和缺点？\"\n",
    "messages.append({\"role\": \"user\", \"content\": request})\n",
    "\n",
    "print(request)\n",
    "\n",
    "response = client.chat.completions.create(\n",
    "    model=\"deepseek-reasoner\",\n",
    "    messages=messages,\n",
    "    stream=True,\n",
    "    max_tokens=1024\n",
    ")\n",
    "# for chunk in response:\n",
    "#     if len(chunk.choices) > 0:\n",
    "#         print(chunk.choices[0].delta.content, end=\"\")\n",
    "\n",
    "for chunk in response:\n",
    "    if len(chunk.choices) > 0 and chunk.choices[0].delta.content:\n",
    "        # content += chunk.choices[0].delta.content\n",
    "        print(chunk.choices[0].delta.content, end='')\n",
    "\n",
    "print()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "rag",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
