{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Acknowledgement\n",
    "This ipynb file is only for draft coding. Well coded scripts are in python file"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Environment and basic functions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import json\n",
    "import datetime\n",
    "import re\n",
    "import xlsxwriter\n",
    "import xlrd\n",
    "\n",
    "# file must exist and not empty\n",
    "def exist(p):\n",
    "    if not os.path.exists(p):        \n",
    "        # if targeted file is txt, create this file for user's convenience\n",
    "        if p.split('.')[-1] == 'txt':            \n",
    "            print(f'{p} doesn\\'t exist. \\n A new {p} has been created, please paste required content in.')\n",
    "            f = open(p, mode='w', encoding='utf-8-sig')\n",
    "            f.write('')\n",
    "            f.close()\n",
    "            input('\\n* Any key to exist, please run the script again afterwards')\n",
    "        else:\n",
    "            input('\"{}\"文件不存在，请确认，任意键退出'.format(p))\n",
    "    if p.split('.')[-1] == 'txt':\n",
    "        f = open(p, mode='r')\n",
    "        if len(f.read())<1:\n",
    "            input(f'{p} is empty, please check')\n",
    "        f.close()\n",
    "    exit()\n",
    "\n",
    "# file_format_list = [‘.xls’, ‘.xlsx’]\n",
    "def file_name(file_dir, file_format_list):\n",
    "    \"\"\"Find goal file path\"\"\"\n",
    "    l = []\n",
    "    # root: current path, dirs\n",
    "    for root, dirs, files in os.walk(file_dir):\n",
    "        for file in files:\n",
    "            if os.path.splitext(file)[1] in file_format_list:\n",
    "                l.append(os.path.join(root, file))\n",
    "    return l\n",
    "\n",
    "def human_choose(choices, times):\n",
    "\t# choices: 供选择的list\n",
    "\t# times： 需要选择的个数\n",
    "    id = []\n",
    "    for i, j in enumerate(choices):\n",
    "        print(i, ':', j)\n",
    "        id.append(i)\n",
    "    print(f'本次需选择{times}个数据，请输入对应编号：\\n（严格按照目标字段顺序先后输入，英文逗号分隔，Enter确定）')\n",
    "    file = []\n",
    "    input_check(id, len(id), file, times)\n",
    "    return file\n",
    "\n",
    "def input_check(must_in, l, f, count):\n",
    "    # three chances\n",
    "    for i in range(4):\n",
    "        if i == 3:\n",
    "            input('请重新运行脚本，任意键退出')\n",
    "            exit()\n",
    "        r = input()\n",
    "        # numbers limit\n",
    "        if len(r) == 0:\n",
    "            print('输入值为空，剩余输入次数为:', (2 - i))\n",
    "            continue\n",
    "        elif len(r) > l:\n",
    "            print('输入值超出限制，剩余输入次数为:', (2 - i))\n",
    "            continue\n",
    "        else:\n",
    "            rr = r.split(',')\n",
    "            # avoid same number\n",
    "            g = 'out'\n",
    "            rrr = []\n",
    "            for j in rr:\n",
    "                # must fit the set mode\n",
    "                for k in must_in:\n",
    "                    if int(j) == k:\n",
    "                        g = 'in'\n",
    "                        print(j)\n",
    "                        rrr.append(j)\n",
    "                        break\n",
    "                    else:\n",
    "                        g = 'out'\n",
    "            if len(rrr) != count:\n",
    "                g = 'out'\n",
    "            if g == 'out':\n",
    "                print('输入错误，剩余输入次数为:', (2 - i))\n",
    "                continue\n",
    "            else:\n",
    "                for m in rrr:\n",
    "                    print('input:', m)\n",
    "                    f.append(int(m))\n",
    "                break\n",
    "\n",
    "\n",
    "# open a xlsx file and read data\n",
    "def excel_read(p, select, f):\n",
    "    # pip install xlrd==1.2.0\n",
    "    exist(p)\n",
    "    print(f'打开：{p}')\n",
    "    data = xlrd.open_workbook(p)\n",
    "    table = data.sheets()[0]\n",
    "    lines = table.nrows\n",
    "    # select specific columns\n",
    "    if len(select) != 0:\n",
    "        head = table.row_values(0)\n",
    "        select_c = []\n",
    "        for i, j in enumerate(head):\n",
    "            if j in select:\n",
    "                select_c.append(i)\n",
    "                if len(select_c) == len(select):\n",
    "                    break\n",
    "        if len(select_c) != len(select):\n",
    "            print(f'文件{p}中未找到目标字段{select}，请确认！\\n')\n",
    "            select_c = human_choose(head, len(select))\n",
    "            print('选择列：', select_c)\n",
    "        f.append(select)\n",
    "        for i in range(lines-1):\n",
    "            row_s = []\n",
    "            for j in select_c:\n",
    "                row_s.append(table.cell_value(i+1, j))\n",
    "            f.append(row_s)\n",
    "    else:\n",
    "        for i in range(lines):\n",
    "            f.append(table.row_values(i + 1))\n",
    "    print(f'共读取到：{len(f)} 行数据（含首行）')\n",
    "    return\n",
    "\n",
    "\n",
    "# write data into txt\n",
    "def txt_write(path, data):\n",
    "    f = open(path, mode='a', encoding='utf8')\n",
    "    data_line = []\n",
    "\n",
    "    def list_split(d):\n",
    "        if type(d) in (list, tuple): \n",
    "            for m in d:\n",
    "                list_split(m)\n",
    "        else:\n",
    "            data_line.append(d)\n",
    "        return\n",
    "\n",
    "    if type(data) in (list, tuple):\n",
    "        for i in data:\n",
    "            list_split(i)\n",
    "            j = ''\n",
    "            for k in data_line:\n",
    "                j += ','+str(k)\n",
    "            f.write(j.lstrip('\"\",')+'\\n')\n",
    "            data_line.clear()\n",
    "    else:\n",
    "        f.write(data)\n",
    "    print('保存至：'+path)\n",
    "    f.close()\n",
    "\n",
    "    # file must existence\n",
    "def exist(p):\n",
    "    if not os.path.exists(p):\n",
    "        input('\"{}\"文件不存在，请确认，任意键退出'.format(p))\n",
    "        exit()\n",
    "\n",
    "def txt_read(path):\n",
    "    \"\"\"read txt file and return a list\"\"\"\n",
    "    exist(path)\n",
    "    f = open(path, mode='r', encoding='utf8')\n",
    "    l = []\n",
    "    wrong = 0\n",
    "    for i in f.readlines():\n",
    "        j = i.strip('\\n')\n",
    "        if len(j) != 0:\n",
    "            l.append(j)\n",
    "        else:\n",
    "            wrong += 1\n",
    "    print('* Have read the file: {}; empty lines : {}'.format(path, wrong))\n",
    "    return l\n",
    "\n",
    "def excel_write(ph, data):\n",
    "    from math import ceil\n",
    "    # create a table and write in data\n",
    "    table = xlsxwriter.Workbook(ph)\n",
    "    # avoid the rows limit for xlsx:1048576\\xls:65536\n",
    "    table_type = ph.split('.')[-1]\n",
    "    if table_type == 'xlsx':\n",
    "        # ceil: 向上取整\n",
    "        sheet_num = ceil((len(data)-1)/1048000)\n",
    "    else:\n",
    "        sheet_num = int(len(data)/6500)\n",
    "    print(f'* Total Data:{len(data)} rows; Target Sheet:{sheet_num} sheets\\n* writing......')\n",
    "    for s in range(sheet_num):\n",
    "        sheet = table.add_worksheet(f'sheet{s}')\n",
    "        if table_type == 'xlsx':\n",
    "            id_begin = s * 1048000\n",
    "            id_end = 1048000 + id_begin\n",
    "        else:\n",
    "            id_begin = s * 6500\n",
    "            id_end = 6500 + id_begin\n",
    "        if id_end > len(data):\n",
    "            id_end = len(data)-1\n",
    "        # write head\n",
    "        sheet.write(0, 0, 'ID')\n",
    "        for k in range(len(data[0])):\n",
    "            sheet.write(0, k + 1, data[0][k])\n",
    "        # write data\n",
    "        for i in range(id_begin, id_end):\n",
    "            # write ID column\n",
    "            sheet.write(i-id_begin+1, 0, i + 1)\n",
    "            for j in range(len(data[i + 1])):\n",
    "                # 这是最宽泛的将数字、字符串分开的方法，不过有可能出错，可以考虑结合“判断是否为数字”的代码使用\n",
    "                try:\n",
    "                    sheet.write(i-id_begin+1, j + 1, data[i + 1][j])\n",
    "                except:\n",
    "                    sheet.write(i-id_begin+1, j + 1, str(data[i + 1][j]))\n",
    "        print(f'* Have finished:sheet{s}')\n",
    "    table.close()\n",
    "    print('###save as ' + ph)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# I. Read posts and save as json file for batch API"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model_to_use = {'ChatGPT':'gpt-4o-mini', 'DeepSeek':'deepseek-ai/DeepSeek-V3'}\n",
    "model_selected = ''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# for both ChatGPT and DeepSeek\n",
    "print('\\n* Please select data source')\n",
    "mode = human_choose(['load prompts from txt (general prompts, one prompt per line)', \n",
    "                    'load prompts from Xiaohongshu data (acquired by RedNoteSpider script)'], 1)[0]\n",
    "if mode == 0:\n",
    "    print('\\n* load prompts from prompts.txt (general prompts, one prompt per line)')\n",
    "    exist('prompts.txt')\n",
    "    post_list = txt_read('prompts.txt')\n",
    "else:\n",
    "    print('\\n* load prompts from Xiaohongshu data')\n",
    "    print('\\n* list all excel files in this folder')\n",
    "    p1 = os.getcwd()\n",
    "    p = file_name(p1, ['.xls', '.xlsx'])\n",
    "    file_path = p[human_choose(p, 1)[0]]\n",
    "    print('\\n* read the sheet0 data')\n",
    "    info_targeted = ['ID', 'ip', 'post_date', 'author', 'title', 'content', 'comments_selected']\n",
    "    info_list = []\n",
    "    excel_read(file_path, info_targeted, info_list)\n",
    "    print('\\n* convert to prompts for selected ai model:\\nID: {ID} title: {title} posted by {author} on {post_date}  in {ip} with content: {content} with comments: {comments_selected}')\n",
    "    post_list = []\n",
    "    for i in range(len(info_list)-1):\n",
    "        prompt = f'ID: {int(info_list[i+1][0])} title: {info_list[i+1][4]} posted by {info_list[i+1][3]} on {info_list[i+1][2]} in {info_list[i+1][1]} with content: {info_list[i+1][-2]} with comments: {info_list[i+1][-1]}'\n",
    "        post_list.append(prompt)\n",
    "    print(f'Save {len(post_list)} prompt(s) to submit')\n",
    "    dt = datetime.datetime.now().strftime('%d%m%Y-%H%M%S')\n",
    "    txt_write(f'prompts_{dt}.txt', post_list)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# II. Submit the batch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Prepare batch - Same for both ChatGPT and DeepSeek\n",
    "prompt_in = 'prompt_your_requirement.txt'\n",
    "print(f'''\\n* Read your prompt (command for AI model to execute)\n",
    "      \\nPlease save all your requirements in {prompt_in}''')\n",
    "exist(prompt_in)\n",
    "# read the prompt\n",
    "f = open(prompt_in, mode='r', encoding='utf-8-sig')\n",
    "prompt_up = f.read()\n",
    "f.close()\n",
    "print('\\n* Received your requirements:\\n\\n', prompt_up)\n",
    "print('\\n* convert all your prompts and your requirements to a json file for the batch')\n",
    "requests = []\n",
    "for i in range(len(post_list)):\n",
    "    request = {\"custom_id\": f\"request-{i}\", \"method\": \"POST\", \"url\": \"/v1/chat/completions\", \n",
    "               \"body\": {\"model\": \"gpt-4o-mini\", \n",
    "                        \"messages\": [{\"role\": \"assistant\", \"content\": prompt_up},{\"role\": \"user\", \"content\": str(post_list[i])}]}}\n",
    "    requests.append(json.dumps(request))\n",
    "txt_write('batchinput.jsonl', requests)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Define API key\n",
    "ChatGPT: run this command in terminal: setx OPENAI_API_KEY \"your_api_key_here\"; restart IDE <br>\n",
    "DeepSeek: copy the API generated on https://docs.siliconflow.cn/"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "gpt_api = '''\\n* Please define ChatGPT API by\n",
    "      \\nrunning this code in any terminal: setx OPENAI_API_KEY \"your_api_key_here\"\n",
    "      \\nand then restart the script\n",
    "      \\n guidance:\n",
    "      a) win+R; \n",
    "      b) type in \\'cmd\\'; \n",
    "      c) paste the code provided above in the pop-up terminal , it is expected to see the result: \\'SUCCESS: Specified value was saved.\\''''\n",
    "try:\n",
    "    from openai import OpenAI\n",
    "    client = OpenAI()\n",
    "except:\n",
    "     input(gpt_api)\n",
    "     exit()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# DeepSeek\n",
    "from openai import OpenAI\n",
    "client = OpenAI(\n",
    "    api_key=\"\", \n",
    "    base_url=\"https://api.siliconflow.cn/v1\"\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# upload the batch\n",
    "def dict_extract(content, prefix=\"\"):\n",
    "    \"\"\"\n",
    "    Recursively extracts information from a nested dictionary.\n",
    "    \n",
    "    Args:\n",
    "        content (dict): The dictionary to process.\n",
    "        prefix (str): Prefix for lowest level of keys to save as headings.\n",
    "\n",
    "    Returns:\n",
    "        list: A list of tuples containing headings and corresponding cell values.\n",
    "    \"\"\"\n",
    "    data = []\n",
    "    for key, value in content.items():\n",
    "        current_prefix = f\"{prefix}_{key}\" if prefix else key\n",
    "        if isinstance(value, dict):\n",
    "            data.extend(dict_extract(value, current_prefix))\n",
    "        else:\n",
    "            data.append((current_prefix.split('_')[-1], value))\n",
    "    return data # [('Post ID', '140'), ('Relevance', 'No'), ...]\n",
    "\n",
    "print('\\n* Uploading the file to ChatGPT...')\n",
    "batch_input_file = client.files.create(\n",
    "    file=open(\"batchinput_06022025-193647.jsonl\", \"rb\"),\n",
    "    purpose=\"batch\"\n",
    "    )\n",
    "print(batch_input_file)\n",
    "# file_id = batch_input_file.data['id'] # deepseek"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "* Start the batch\n"
     ]
    }
   ],
   "source": [
    "print('\\n* Start the batch')\n",
    "# start the batch\n",
    "batch_task = client.batches.create(\n",
    "    #input_file_id=batch_input_file.id,\n",
    "    input_file_id = 'file-dl4nh6pvi5pxrle',\n",
    "    endpoint=\"/v1/chat/completions\",\n",
    "    completion_window=\"24h\",\n",
    "    metadata={\n",
    "        \"description\": \"nightly eval job\"\n",
    "    },\n",
    "    extra_body={\"replace\": {\"model\": \"deepseek-ai/DeepSeek-V3\"}}\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "III. Check batch status"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print('\\n* Checking the batch status...')\n",
    "batch_id = batch_task.id # output from previous step\n",
    "batch = client.batches.retrieve(batch_id)\n",
    "if batch.status == 'completed':\n",
    "    print('output_file_id = ', batch.output_file_id)\n",
    "else:\n",
    "    print(batch.status)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "IV. Retrieve results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "True"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# DeepSeek - download the outputs; manual rename it as 'results.jsonl' and move it to this folder\n",
    "import webbrowser\n",
    "webbrowser.open(batch.output_file_id)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "responses = []\n",
    "with open(\"results.jsonl\", \"r\", encoding='utf-8') as file:\n",
    "    for line in file:\n",
    "        responses.append(line)\n",
    "print(responses)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# ChatGPT\n",
    "print('\\n* Retrieving batch results...')\n",
    "output_file_id = batch.output_file_id\n",
    "file_response = client.files.content(output_file_id)\n",
    "txt_write('results_raw.txt', file_response.text)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# save responses as list - ChatGPT\n",
    "responses = file_response.text.rstrip('\\n').split('\\n')\n",
    "print(len(responses), 'response(s)\\n' )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print('\\n* Format and save batch results as excel file...')\n",
    "\n",
    "# Initialize variables\n",
    "results_dict = {'created_time':{}, 'model_used':{}}\n",
    "results_list = []\n",
    "headings_final = ['created_time', 'model_used']\n",
    "\n",
    "# extract key information\n",
    "for i in responses:\n",
    "    i = json.loads(i)\n",
    "   \n",
    "    timestamp = int(i['response']['body']['created'])\n",
    "    created_time = datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')\n",
    "\n",
    "    model_used = i['response']['body']['model']    \n",
    "\n",
    "    # extract content\n",
    "    content_raw = i['response']['body']['choices'][0]['message']['content']\n",
    "    match = re.search(r'{.*}', content_raw, re.DOTALL)\n",
    "    content = json.loads(match.group(0))\n",
    "    headings, cells = zip(*dict_extract(content)) # ['Post ID', 'Relevance', ...], ['140', 'No', ...]\n",
    "    \n",
    "    # format content\n",
    "    post_id = str(cells[0])\n",
    "    results_dict['created_time'][post_id] = created_time\n",
    "    results_dict['model_used'][post_id] = model_used\n",
    "    for i in range(len(headings)):\n",
    "        heading = headings[i]\n",
    "        if heading not in results_dict.keys():          \n",
    "            results_dict[str(heading)] = {}    \n",
    "        results_dict[str(heading)][post_id] = cells[i] # {'created_time':{}, 'model_used':{}, 'Post ID': {'140': '140', ...}, 'Relevance': {'140': 'No'...}, ...}\n",
    "\n",
    "# convert result_dict to a list for exporting\n",
    "headings_final = list(results_dict.keys()) # ['created_time', 'model_used', 'Post ID', 'Relevance', ...]\n",
    "results_list.append(headings_final)\n",
    "for post_id in results_dict['Post ID'].keys():\n",
    "    # save info of each post\n",
    "    post = []\n",
    "    for i in range(len(headings_final)):         \n",
    "        info_key = headings_final[i]\n",
    "        info_list = results_dict[info_key]\n",
    "        if str(post_id) in info_list:\n",
    "            info = results_dict[info_key][str(post_id)]\n",
    "        else:\n",
    "            info = ''  \n",
    "        post.append(info)\n",
    "    results_list.append(post)\n",
    "\n",
    "# save as excel file\n",
    "dt = datetime.datetime.now().strftime('%d%m%Y-%H%M%S')\n",
    "excel_write(f'results_{dt}.xlsx', results_list)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
