{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import re\n",
    "import random\n",
    "from tqdm import tqdm\n",
    "from typing import Optional, Dict, Sequence, List\n",
    "# import argparse\n",
    "\n",
    "def extract_last_num(text: str) -> float:\n",
    "    text = re.sub(r\"(\\d),(\\d)\", \"\\g<1>\\g<2>\", text)  # 处理形如 123,456\n",
    "    res = re.findall(r\"(\\d+(\\.\\d+)?)\", text)  # 匹配 123456.789\n",
    "    if len(res) > 0:\n",
    "        num_str = res[-1][0]\n",
    "        return float(num_str)\n",
    "    else:\n",
    "        return 0.0\n",
    "    \n",
    "def check(key, truth, predict):\n",
    "    if key in ['cycle', 'connectivity', 'hamilton', 'substructure', 'bipartite']:\n",
    "        if '###' in predict:\n",
    "            if 'yes' in truth.lower() and 'yes' in predict.split('###')[-1].lower():\n",
    "                # correct_samples[key].append(v)\n",
    "                return True\n",
    "            elif 'no' in truth.lower() and 'no' in predict.split('###')[-1].lower():\n",
    "                return True\n",
    "            return False\n",
    "        else:\n",
    "            matches = re.findall(r'(yes|no)', predict, flags=re.IGNORECASE)\n",
    "            if matches:\n",
    "                last_match = matches[-1].lower()\n",
    "                if last_match == 'yes' and 'yes' in truth.lower():\n",
    "                    return True\n",
    "                elif last_match == 'no' and 'no' in truth.lower():\n",
    "                    return True\n",
    "                return False\n",
    "            else:\n",
    "                return False\n",
    "                      \n",
    "    elif key in ['flow', 'shortest', 'triplet']:\n",
    "      \n",
    "        t_num = extract_last_num(truth)\n",
    "        p_num = extract_last_num(predict.split('###')[-1])\n",
    "        if abs(t_num - p_num) < 1e-2:\n",
    "            return True\n",
    "        return False\n",
    "                \n",
    "    elif key == 'topology':\n",
    "        \n",
    "        if '###' in predict:\n",
    "            pre = predict.split('###')[-1].strip(' ')\n",
    "            truth = truth.split('###')[-1].strip(' ')\n",
    "            if truth in pre or pre in truth:\n",
    "                return True\n",
    "            return False\n",
    "        else:\n",
    "            truth = truth.split('###')[-1].split(',')\n",
    "            for t in truth:\n",
    "                if t in predict or t.strip(' ') in predict:\n",
    "                    return True\n",
    "            return False\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('/cpfs/user/chennuo/CN/Graph_RFT_Data/gpt4data/gpt4_gsm8knlg_sample3_v2_output.json') as f:\n",
    "    datas = f.readlines()\n",
    "print(len(datas))\n",
    "\n",
    "tasks = ['cycle', 'connectivity', 'hamilton', 'substructure', 'bipartite', 'flow', 'shortest', 'triplet', 'topology']\n",
    "correct_samples  = {task:[] for task in tasks}\n",
    "all_samples  = {task:[] for task in tasks}\n",
    "temp = 0\n",
    "select_samples = []\n",
    "math_samples = []\n",
    "for data in datas:\n",
    "    \n",
    "    data = json.loads(data)\n",
    "        \n",
    "    task = data['task']\n",
    "    \n",
    "    if task in ['4','5', '6', '7', '8','9']:\n",
    "        temp += 1\n",
    "        math_samples.append(data)\n",
    "        continue\n",
    "    if check(task, data['response'], data['dv3_response']) and len(data['dv3_response']) >=15:\n",
    "        if data['query'] not in  correct_samples[task]:\n",
    "            correct_samples[task].append(data['query'])\n",
    "            select_samples.append(data)\n",
    "            \n",
    "    if data['query'] not in all_samples[task]:\n",
    "        all_samples[task].append(data['query'])\n",
    "        \n",
    "with open('/cpfs/user/chennuo/CN/Graph_RFT_Data/gpt4data/gpt4_generate_nlg_v2.json', 'w' ) as writer:\n",
    "    for sample in select_samples:\n",
    "        writer.write(json.dumps(sample, ensure_ascii=False) + '\\n')\n",
    "\n",
    "\n",
    "with open('/cpfs/user/chennuo/CN/Graph_RFT_Data/gpt4data/gpt4_generate_math_v2.json', 'w' ) as writer:\n",
    "    for sample in math_samples:\n",
    "        writer.write(json.dumps(sample, ensure_ascii=False) + '\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "math_samples[:2]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for key, value in all_samples.items():\n",
    "    print(key)\n",
    "    print(len(value))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "len(select_samples)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for key, value in correct_samples.items():\n",
    "    \n",
    "    total = all_samples[key]\n",
    "    if len(total) == 0:\n",
    "        continue\n",
    "    print(key)\n",
    "    print(len(value)/len(total))\n",
    "\n",
    "# all_samples"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "correct_samples['triplet'][-1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "import json\n",
    "with open('/cpfs/user/chennuo/CN/Graph_RFT_Data/gpt4data/gpt4_generate_nlg_v2.json') as f:\n",
    "    datas = f.readlines()\n",
    "refine_datas = []\n",
    "temp = 0\n",
    "for data in datas:\n",
    "    data = json.loads(data)\n",
    "    new_data = dict()\n",
    "    new_data['query'] = data['query']\n",
    "    new_data['task'] = data['task']\n",
    "    new_data['response'] = data['response']\n",
    "    response = data['dv3_response'].split('A:')[-1].split('\\n\\n')[-1]\n",
    "    if '###' not in response:\n",
    "        response +=  data['response'] + '.'\n",
    "    if 'print' in response or 'return' in response or 'def' in response:\n",
    "        temp += 1\n",
    "        continue\n",
    "    new_data['CoT_response'] = response\n",
    "    refine_datas.append(new_data)\n",
    "\n",
    "with open('/cpfs/user/chennuo/CN/Graph_RFT_Data/gpt4data/graph_source_data_v2.json', 'w' ) as writer:\n",
    "    for sample in refine_datas:\n",
    "        writer.write(json.dumps(sample, ensure_ascii=False) + '\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import random\n",
    "random.sample(refine_datas, 10)[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "PROMPT_DICT = {\n",
    "    \"prompt_input\": (\n",
    "        \"Below is an instruction that describes a task, paired with an input that provides further context. \"\n",
    "        \"Write a response that appropriately completes the request.\\n\\n\"\n",
    "        \"### Instruction:\\n{instruction}\\n\\n### Input:\\n{input}\\n\\n### Response:\"\n",
    "    ),\n",
    "    \"prompt_no_input\": (\n",
    "        \"Below is an instruction that describes a task. \"\n",
    "        \"Write a response that appropriately completes the request.\\n\\n\"\n",
    "        \"### Instruction:\\n{instruction}\\n\\n### Response:\"\n",
    "    ),\n",
    "}\n",
    "\n",
    "\n",
    "with open('/cpfs/user/chennuo/CN/Graph-Reasoning-LLM/datasets/data/graph_v2_dsformat.json', 'w' ) as writer:\n",
    "    for sample in refine_datas:\n",
    "        new_sample = dict()\n",
    "        new_sample['prompt'] = PROMPT_DICT[\"prompt_no_input\"].format(instruction=sample['query'])\n",
    "        new_sample['chosen'] = sample['CoT_response']\n",
    "        new_sample['reject'] = 'I do not know'\n",
    "        writer.write(json.dumps(new_sample, ensure_ascii=False) + '\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import re\n",
    "\n",
    "text = \"This is a sample string with YES and no, as well as YES.\"\n",
    "\n",
    "# Use regular expressions to find the last 'yes' or 'no' (case-insensitive)\n",
    "matches = re.findall(r'(yes|no)', text, flags=re.IGNORECASE)\n",
    "\n",
    "if matches:\n",
    "    last_match = matches[-1].lower()\n",
    "    if last_match == 'yes':\n",
    "        print('yes')\n",
    "    elif last_match == 'no':\n",
    "        print('no')\n",
    "else:\n",
    "    print(\"No 'yes' or 'no' found in the text.\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import json\n",
    "import random\n",
    "\n",
    "# Define the paths\n",
    "input_folder = '/cpfs/user/chennuo/CN/Graph-Reasoning-LLM/datasets/train_set'\n",
    "output_folder = '/cpfs/user/chennuo/CN/Graph-Reasoning-LLM/datasets/train_set_shuffle'\n",
    "\n",
    "# Create the output folder if it doesn't exist\n",
    "if not os.path.exists(output_folder):\n",
    "    os.makedirs(output_folder)\n",
    "\n",
    "# Get the list of JSON files in the input folder\n",
    "json_files = [file for file in os.listdir(input_folder) if file.endswith('.json')]\n",
    "\n",
    "# Iterate over each JSON file\n",
    "for file in json_files:\n",
    "    # Read the JSON file\n",
    "    with open(os.path.join(input_folder, file)) as f:\n",
    "        datas = f.readlines()\n",
    "    \n",
    "    data = [json.loads(item) for item in datas]\n",
    "    # Assign sample IDs\n",
    "    max_length = len(data)\n",
    "    for i, sample in enumerate(data):\n",
    "        sample['sample_id'] = i\n",
    "    \n",
    "    # Shuffle the samples\n",
    "    random.shuffle(data)\n",
    "    \n",
    "    # Write the shuffled data to a new JSON file in the output folder\n",
    "    output_file = os.path.join(output_folder, file)\n",
    "    with open(output_file, 'w') as f:\n",
    "        # json.dump(data, f)\n",
    "        for new_sample in data:\n",
    "            f.write(json.dumps(new_sample, ensure_ascii=False) + '\\n')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "str(5)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
