{"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[]},"gpuClass":"standard","kernelspec":{"name":"python3","display_name":"Python 3","language":"python"},"language_info":{"name":"python","version":"3.10.13","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[{"sourceId":7571253,"sourceType":"datasetVersion","datasetId":4407676},{"sourceId":7678915,"sourceType":"datasetVersion","datasetId":4479814},{"sourceId":7713636,"sourceType":"datasetVersion","datasetId":4504654},{"sourceId":7964016,"sourceType":"datasetVersion","datasetId":4685329},{"sourceId":8017122,"sourceType":"datasetVersion","datasetId":4723613}],"dockerImageVersionId":30683,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"markdown","source":"

Benchmark 2: dolphin-2.2.1-mistral-7b

\n","metadata":{}},{"cell_type":"markdown","source":"
\n

\n Notebook Gool\n

\n

\nThe objective of this notebook is to evaluate the performance of dolphin-2.2.1-mistral-7b and OpenHermes using the Table-extract Benchmark dataset available at Hugging Face.

\n
\n","metadata":{}},{"cell_type":"markdown","source":"#
Table of Content
\n\n* [I. Loading and Importing Libraries](#1)\n* [II. Definition and Implementation of Metrics](#2)\n* [III. Clean Response Obtained by LLM](#3)\n* [IV. Data Preparation](#5)\n* [V. Benchmark](#6)\n * [Prompt](#61)\n * [dolphin-2.2.1-mistral-7b](#62)","metadata":{}},{"cell_type":"markdown","source":"\n#
I | Loading and Importing Libraries
\n","metadata":{}},{"cell_type":"code","source":"%%capture\n!pip install google-generativeai\n!pip install --upgrade pip\n!pip install bitsandbytes\n!pip install transformers","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:24:27.841516Z","iopub.execute_input":"2024-04-17T13:24:27.842129Z","iopub.status.idle":"2024-04-17T13:25:43.177008Z","shell.execute_reply.started":"2024-04-17T13:24:27.842064Z","shell.execute_reply":"2024-04-17T13:25:43.175675Z"},"trusted":true},"execution_count":1,"outputs":[]},{"cell_type":"code","source":"import re\nimport json\nfrom tqdm import tqdm\nimport pandas as pd\nfrom datasets import load_dataset, Dataset\nfrom wand.image import Image as WImage\nimport torch\nimport pandas as pd\nfrom transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig\nimport time \nimport random\nimport numpy as np","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:25:43.179621Z","iopub.execute_input":"2024-04-17T13:25:43.180056Z","iopub.status.idle":"2024-04-17T13:25:52.333635Z","shell.execute_reply.started":"2024-04-17T13:25:43.180013Z","shell.execute_reply":"2024-04-17T13:25:52.332414Z"},"trusted":true},"execution_count":2,"outputs":[]},{"cell_type":"code","source":"import google.generativeai as genai\nimport time \ngenai.configure(api_key=\"AIzaSyAhz9UBzkEIYI886zZRm40qqB1Kd_9Y4-0\")","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:25:52.335375Z","iopub.execute_input":"2024-04-17T13:25:52.335913Z","iopub.status.idle":"2024-04-17T13:25:53.003314Z","shell.execute_reply.started":"2024-04-17T13:25:52.335882Z","shell.execute_reply":"2024-04-17T13:25:53.002372Z"},"trusted":true},"execution_count":3,"outputs":[]},{"cell_type":"code","source":"# Set random seed for reproducibility\nrandom.seed(42)\nnp.random.seed(42)\ntorch.manual_seed(42)\ntorch.cuda.manual_seed_all(42)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False","metadata":{"execution":{"iopub.status.busy":"2024-04-17T11:25:06.673288Z","iopub.execute_input":"2024-04-17T11:25:06.674130Z","iopub.status.idle":"2024-04-17T11:25:06.681886Z","shell.execute_reply.started":"2024-04-17T11:25:06.674097Z","shell.execute_reply":"2024-04-17T11:25:06.681086Z"},"trusted":true},"execution_count":4,"outputs":[]},{"cell_type":"markdown","source":"\n#
II | Definition and Implementation of Metrics
\nSo, let's begin by providing an example of the example output.","metadata":{}},{"cell_type":"code","source":"desired_output = [{'aircraft': 'robinson r - 22',\n 'description': 'light utility helicopter',\n 'max gross weight': '1370 lb (635 kg)',\n 'total disk area': '497 ft square (46.2 m square)',\n 'max disk loading': '2.6 lb / ft square (14 kg / m square)'},\n {'aircraft': 'bell 206b3 jetranger',\n 'description': 'turboshaft utility helicopter',\n 'max gross weight': '3200 lb (1451 kg)',\n 'total disk area': '872 ft square (81.1 m square)',\n 'max disk loading': '3.7 lb / ft square (18 kg / m square)'},\n {'aircraft': 'ch - 47d chinook',\n 'description': 'tandem rotor helicopter',\n 'max gross weight': '50000 lb (22680 kg)',\n 'total disk area': '5655 ft square (526 m square)',\n 'max disk loading': '8.8 lb / ft square (43 kg / m square)'},\n {'aircraft': 'mil mi - 26',\n 'description': 'heavy - lift helicopter',\n 'max gross weight': '123500 lb (56000 kg)',\n 'total disk area': '8495 ft square (789 m square)',\n 'max disk loading': '14.5 lb / ft square (71 kg / m square)'},\n {'aircraft': 'ch - 53e super stallion',\n 'description': 'heavy - lift helicopter',\n 'max gross weight': '73500 lb (33300 kg)',\n 'total disk area': '4900 ft square (460 m square)',\n 'max disk loading': '15 lb / ft square (72 kg / m square)'}]\n","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:26:10.848038Z","iopub.execute_input":"2024-04-17T13:26:10.849306Z","iopub.status.idle":"2024-04-17T13:26:10.856880Z","shell.execute_reply.started":"2024-04-17T13:26:10.849271Z","shell.execute_reply":"2024-04-17T13:26:10.855830Z"},"trusted":true},"execution_count":4,"outputs":[]},{"cell_type":"markdown","source":"To compare between the expected list of records and the predicted list of records, we first need to verify the percentage of predicted keys relative to the desired keys","metadata":{}},{"cell_type":"markdown","source":">## Percentage of predicted keys","metadata":{}},{"cell_type":"markdown","source":"Let's begin by defining a function to retrieve all keys of record","metadata":{}},{"cell_type":"code","source":"def get_keys(d):\n # Iterate over each key-value pair in the dictionary\n for k, v in d.items():\n # Append the key to the list of all_keys\n all_keys.append(k)\n # If the value is a dictionary, recursively call get_keys\n if isinstance(v, dict):\n get_keys(v)\n # If the value is a list, iterate over each item\n elif isinstance(v, list):\n for item in v:\n # If the item is a dictionary, recursively call get_keys\n if isinstance(item, dict):\n get_keys(item)\n# Define a function to retrieve all unique keys from a nested dictionary\ndef get_all_keys(d):\n # Declare all_keys as a global variable\n global all_keys\n # Initialize all_keys as an empty list\n all_keys = []\n # Call the helper function get_keys to populate all_keys\n get_keys(d)\n # Return a list containing the unique keys by converting all_keys to a set and then back to a list\n return list(set(all_keys))","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:26:13.106092Z","iopub.execute_input":"2024-04-17T13:26:13.106999Z","iopub.status.idle":"2024-04-17T13:26:13.117009Z","shell.execute_reply.started":"2024-04-17T13:26:13.106957Z","shell.execute_reply":"2024-04-17T13:26:13.115623Z"},"trusted":true},"execution_count":5,"outputs":[]},{"cell_type":"code","source":"# Testing our function\nget_all_keys(desired_output[0])","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:26:15.883427Z","iopub.execute_input":"2024-04-17T13:26:15.883817Z","iopub.status.idle":"2024-04-17T13:26:15.891542Z","shell.execute_reply.started":"2024-04-17T13:26:15.883783Z","shell.execute_reply":"2024-04-17T13:26:15.890415Z"},"trusted":true},"execution_count":6,"outputs":[{"execution_count":6,"output_type":"execute_result","data":{"text/plain":"['max gross weight',\n 'aircraft',\n 'description',\n 'total disk area',\n 'max disk loading']"},"metadata":{}}]},{"cell_type":"markdown","source":"Now, we define the percentage of predicted keys as follows:\n\n$$\\Large \\text{Percentage of predicted keys} = \\frac{\\text{Number of correctly predicted keys}}{\\text{Total number of true keys}}$$\nThis percentage is calculated for every record in the list, then summed and divided by the number of records in the list.","metadata":{}},{"cell_type":"code","source":"def process_dict(data):\n if isinstance(data, dict):\n for key, value in data.items():\n if isinstance(value, str):\n data[key] = value.strip().lower()\n elif isinstance(value, list):\n data[key] = [process_dict(item) for item in value]\n elif isinstance(value, dict):\n data[key] = process_dict(value)\n return data","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:26:18.834048Z","iopub.execute_input":"2024-04-17T13:26:18.834988Z","iopub.status.idle":"2024-04-17T13:26:18.841331Z","shell.execute_reply.started":"2024-04-17T13:26:18.834951Z","shell.execute_reply":"2024-04-17T13:26:18.840255Z"},"trusted":true},"execution_count":7,"outputs":[]},{"cell_type":"code","source":"def percentage_of_predicted_keys(true_dic, pred_dic):\n true_dic=process_dict(true_dic)\n pred_dic=process_dict(pred_dic)\n # Get all keys of the true dictionary\n all_keys_of_true_dic = get_all_keys(true_dic)\n # Get all keys of the predicted dictionary\n all_keys_of_pred_dic = get_all_keys(pred_dic)\n \n # Check if there are no keys in the true dictionary to avoid division by zero\n if len(all_keys_of_true_dic) == 0:\n return 0 # Avoid division by zero\n \n # Initialize count of predicted keys\n p_keys = 0\n # Iterate through all keys in the predicted dictionary\n for key in all_keys_of_pred_dic:\n # Check if the key is also present in the true dictionary\n if key in all_keys_of_true_dic:\n # Increment count if the key is found in both dictionaries\n p_keys += 1\n \n # Calculate the percentage of predicted keys compared to true keys\n p_keys /= len(all_keys_of_true_dic)\n # Return the percentage of predicted keys\n return p_keys","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:26:20.390744Z","iopub.execute_input":"2024-04-17T13:26:20.391436Z","iopub.status.idle":"2024-04-17T13:26:20.398265Z","shell.execute_reply.started":"2024-04-17T13:26:20.391403Z","shell.execute_reply":"2024-04-17T13:26:20.397237Z"},"trusted":true},"execution_count":8,"outputs":[]},{"cell_type":"code","source":"def average_percentage_key(true_list, pred_list):\n min_length = min(len(true_list), len(pred_list)) # Find the minimum length of the two lists\n score = 0\n for i in range(min_length):\n score += percentage_of_predicted_keys(true_list[i], pred_list[i])\n return score / len(true_list)","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:26:22.657056Z","iopub.execute_input":"2024-04-17T13:26:22.657871Z","iopub.status.idle":"2024-04-17T13:26:22.663217Z","shell.execute_reply.started":"2024-04-17T13:26:22.657838Z","shell.execute_reply":"2024-04-17T13:26:22.662241Z"},"trusted":true},"execution_count":9,"outputs":[]},{"cell_type":"code","source":"# Example true and predicted lists\ntrue_list = [{'key1': 1, 'key2': 2, 'key3': 3}, {'key1': 4, 'key2': 5, 'key3': 6}, {'key1': 7, 'key2': 8, 'key3': 9}]\npred_list = [{'key1': 1, 'key2': 2, 'key3': 3}, {'key1': 4, 'key2': 5, 'key3': 7}, {'key1': 7, 'key2': 8, 'key3': 9}]\n\n# Test the function\nresult = average_percentage_key(true_list, pred_list)\nprint(\"Average percentage of keys:\", result)","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:26:23.851951Z","iopub.execute_input":"2024-04-17T13:26:23.852443Z","iopub.status.idle":"2024-04-17T13:26:23.859938Z","shell.execute_reply.started":"2024-04-17T13:26:23.852412Z","shell.execute_reply":"2024-04-17T13:26:23.858744Z"},"trusted":true},"execution_count":10,"outputs":[{"name":"stdout","text":"Average percentage of keys: 1.0\n","output_type":"stream"}]},{"cell_type":"markdown","source":"Now we will define the principal metrics used to compare the values of two list recods.","metadata":{}},{"cell_type":"markdown","source":">## Percentage of predicted values\n\nThe function calculates the percentage of correctly predicted values compared to the total number of true values across different types of data structures.\n\nThe formula for calculating the percentage of values is as follows:\n\n$$\n\\text{Average percentage of values} = \\frac{\\sum_{i=1}^{\\text{Total number of records}} p_i }{Total number of records}\n$$\n\nHere, $p_i$ represents the percentage of correctly predicted values for each key. It's calculated as:\n\n$$p_i = \\frac{\\text{Number of correctly predicted values of item i}}{\\text{Total number of true values of item i}}$$","metadata":{}},{"cell_type":"code","source":"def calculate_percentage_of_values(true_dic, pred_dic):\n total_percentage = 0 # Initialize total percentage\n # Type 1: Single string values\n for key, true_value in true_dic.items(): # Loop through key-value pairs in true_dic\n \n # Check if the key exists in pred_dic, if its value is a string and if it matches the true value\n if key in pred_dic and str(pred_dic[key]) == str(true_value):\n match = 1 # Assign perfect match\n else:\n match = 0 # Assign no match\n total_percentage += match\n return total_percentage / len(true_dic) # Calculate and return the average percentage","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:27:07.067268Z","iopub.execute_input":"2024-04-17T13:27:07.068049Z","iopub.status.idle":"2024-04-17T13:27:07.074330Z","shell.execute_reply.started":"2024-04-17T13:27:07.068016Z","shell.execute_reply":"2024-04-17T13:27:07.073145Z"},"trusted":true},"execution_count":11,"outputs":[]},{"cell_type":"code","source":"def average_percentage_value(true_list, pred_list):\n min_length = min(len(true_list), len(pred_list)) # Find the minimum length of the two lists\n score = 0\n for i in range(min_length):\n score += calculate_percentage_of_values(true_list[i], pred_list[i])\n return score / len(true_list)","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:27:09.111778Z","iopub.execute_input":"2024-04-17T13:27:09.112434Z","iopub.status.idle":"2024-04-17T13:27:09.118149Z","shell.execute_reply.started":"2024-04-17T13:27:09.112400Z","shell.execute_reply":"2024-04-17T13:27:09.117126Z"},"trusted":true},"execution_count":12,"outputs":[]},{"cell_type":"code","source":"# Example true and predicted lists\ntrue_list = [{'key1': 1, 'key2': 2, 'key3': 3}, {'key1': 4, 'key2': 5, 'key3': 6}, {'key1': 7, 'key2': 8, 'key3': 9}]\npred_list = [{'key1': 1, 'key2': 2, 'key3': 3}, {'key1': 4, 'key2': 5, 'key3': 7}, {'key1': 7, 'key2': 8, 'key3': 9}]\n\n# Test the function\nresult = average_percentage_value(true_list, pred_list)\nprint(\"Average percentage of keys:\", result)","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:27:10.867077Z","iopub.execute_input":"2024-04-17T13:27:10.867514Z","iopub.status.idle":"2024-04-17T13:27:10.874729Z","shell.execute_reply.started":"2024-04-17T13:27:10.867480Z","shell.execute_reply":"2024-04-17T13:27:10.873519Z"},"trusted":true},"execution_count":13,"outputs":[{"name":"stdout","text":"Average percentage of keys: 0.8888888888888888\n","output_type":"stream"}]},{"cell_type":"markdown","source":"\n#
III | Clean Response Obtained by LLM
\n","metadata":{}},{"cell_type":"code","source":"import json\n\ndef parse_json(data_str):\n # Remove leading/trailing whitespace and newlines\n i = data_str.find('{')\n j = data_str.rfind('}')\n data_str = '['+data_str[i:j+1]+']'\n data_str = data_str.strip()\n\n # Check if the string is enclosed within triple backticks (\"```json\" and \"```\")\n if data_str.startswith(\"```json\"):\n # Remove the leading/trailing \"```json\" and \"```\"\n data_str = data_str[len(\"```json\"):]\n if data_str.endswith(\"```\"):\n data_str = data_str[:-len(\"```\")]\n \n try:\n # Parse JSON\n data = json.loads(data_str)\n return data\n except json.JSONDecodeError as e:\n print(\"JSON parsing error:\", e)\n return None","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:27:13.246599Z","iopub.execute_input":"2024-04-17T13:27:13.247416Z","iopub.status.idle":"2024-04-17T13:27:13.254997Z","shell.execute_reply.started":"2024-04-17T13:27:13.247380Z","shell.execute_reply":"2024-04-17T13:27:13.253820Z"},"trusted":true},"execution_count":14,"outputs":[]},{"cell_type":"code","source":"response_str = \"\"\"[{\"aircraft\": \"robinson r - 22\",\n \"description\": \"light utility helicopter\",\n \"max gross weight\": \"1370 lb (635 kg)\",\n \"total disk area\": \"497 ft square (46.2 m square)\",\n \"max disk loading\": \"2.6 lb / ft square (14 kg / m square)\"},\n{\"aircraft\": \"bell 206b3 jetranger\",\n \"description\": \"turboshaft utility helicopter\",\n \"max gross weight\": \"3200 lb (1451 kg)\",\n \"total disk area\": \"872 ft square (81.1 m square)\",\n \"max disk loading\": \"3.7 lb / ft square (18 kg / m square)\"},\n{\"aircraft\": \"ch - 47d chinook\",\n \"description\": \"tandem rotor helicopter\",\n \"max gross weight\": \"50000 lb (22680 kg)\",\n \"total disk area\": \"5655 ft square (526 m square)\",\n \"max disk loading\": \"8.8 lb / ft square (43 kg / m square)\"},\n{\"aircraft\": \"mil mi - 26\",\n \"description\": \"heavy - lift helicopter\",\n \"max gross weight\": \"123500 lb (56000 kg)\",\n \"total disk area\": \"8495 ft square (789 m square)\",\n \"max disk loading\": \"14.5 lb / ft square (71 kg / m square)\"},\n{\"aircraft\": \"ch - 53e super stallion\",\n \"description\": \"heavy - lift helicopter\",\n \"max gross weight\": \"73500 lb (33300 kg)\",\n \"total disk area\": \"4900 ft square (460 m square)\",\n \"max disk loading\": \"15 lb / ft square (72 kg / m square)\"}]\"\"\"\n\n# Convert the string representation to a list of dictionaries\nparse_json(response_str)","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:27:15.047758Z","iopub.execute_input":"2024-04-17T13:27:15.048585Z","iopub.status.idle":"2024-04-17T13:27:15.057956Z","shell.execute_reply.started":"2024-04-17T13:27:15.048551Z","shell.execute_reply":"2024-04-17T13:27:15.056852Z"},"trusted":true},"execution_count":15,"outputs":[{"execution_count":15,"output_type":"execute_result","data":{"text/plain":"[{'aircraft': 'robinson r - 22',\n 'description': 'light utility helicopter',\n 'max gross weight': '1370 lb (635 kg)',\n 'total disk area': '497 ft square (46.2 m square)',\n 'max disk loading': '2.6 lb / ft square (14 kg / m square)'},\n {'aircraft': 'bell 206b3 jetranger',\n 'description': 'turboshaft utility helicopter',\n 'max gross weight': '3200 lb (1451 kg)',\n 'total disk area': '872 ft square (81.1 m square)',\n 'max disk loading': '3.7 lb / ft square (18 kg / m square)'},\n {'aircraft': 'ch - 47d chinook',\n 'description': 'tandem rotor helicopter',\n 'max gross weight': '50000 lb (22680 kg)',\n 'total disk area': '5655 ft square (526 m square)',\n 'max disk loading': '8.8 lb / ft square (43 kg / m square)'},\n {'aircraft': 'mil mi - 26',\n 'description': 'heavy - lift helicopter',\n 'max gross weight': '123500 lb (56000 kg)',\n 'total disk area': '8495 ft square (789 m square)',\n 'max disk loading': '14.5 lb / ft square (71 kg / m square)'},\n {'aircraft': 'ch - 53e super stallion',\n 'description': 'heavy - lift helicopter',\n 'max gross weight': '73500 lb (33300 kg)',\n 'total disk area': '4900 ft square (460 m square)',\n 'max disk loading': '15 lb / ft square (72 kg / m square)'}]"},"metadata":{}}]},{"cell_type":"markdown","source":"\n#
IV | Data Preparation
\n","metadata":{}},{"cell_type":"markdown","source":"I'll extract a sample of 100 records from the dataset excluding those with Arabic names, and then simplify the output to enhance performance.","metadata":{}},{"cell_type":"code","source":"df = pd.read_csv(\"/kaggle/input/table-extraction/table_extract.csv\")\ndf.head(5)","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:27:27.993072Z","iopub.execute_input":"2024-04-17T13:27:27.993995Z","iopub.status.idle":"2024-04-17T13:27:29.765433Z","shell.execute_reply.started":"2024-04-17T13:27:27.993958Z","shell.execute_reply":"2024-04-17T13:27:29.764362Z"},"trusted":true},"execution_count":16,"outputs":[{"execution_count":16,"output_type":"execute_result","data":{"text/plain":" context \\\n0 aircraft ... \n1 order year manufacturer mod... \n2 player no nationality ... \n3 player no nationali... \n4 player no nationality ... \n\n answer \n0 {\"aircraft\":{\"0\":\"robinson r - 22\",\"1\":\"bell 2... \n1 {\"order year\":{\"0\":\"1992 - 93\",\"1\":\"1996\",\"2\":... \n2 {\"player\":{\"0\":\"quincy acy\",\"1\":\"hassan adams\"... \n3 {\"player\":{\"0\":\"patrick o'bryant\",\"1\":\"jermain... \n4 {\"player\":{\"0\":\"mark baker\",\"1\":\"marcus banks\"... ","text/html":"
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
contextanswer
0aircraft ...{\"aircraft\":{\"0\":\"robinson r - 22\",\"1\":\"bell 2...
1order year manufacturer mod...{\"order year\":{\"0\":\"1992 - 93\",\"1\":\"1996\",\"2\":...
2player no nationality ...{\"player\":{\"0\":\"quincy acy\",\"1\":\"hassan adams\"...
3player no nationali...{\"player\":{\"0\":\"patrick o'bryant\",\"1\":\"jermain...
4player no nationality ...{\"player\":{\"0\":\"mark baker\",\"1\":\"marcus banks\"...
\n
"},"metadata":{}}]},{"cell_type":"code","source":"def is_arabic_name(name):\n \"\"\"\n Checks if a name contains Arabic characters.\n\n Args:\n name: The name string to check.\n\n Returns:\n True if Arabic characters are found, False otherwise.\n \"\"\"\n # Regular expression to match Arabic characters\n arabic_pattern = re.compile(\"[\\u0600-\\u06FF]+\")\n\n # Search for Arabic characters in the name\n match = arabic_pattern.search(name)\n\n # Return True if a match is found, False otherwise\n return bool(match)","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:27:30.016345Z","iopub.execute_input":"2024-04-17T13:27:30.017012Z","iopub.status.idle":"2024-04-17T13:27:30.022491Z","shell.execute_reply.started":"2024-04-17T13:27:30.016979Z","shell.execute_reply":"2024-04-17T13:27:30.021413Z"},"trusted":true},"execution_count":17,"outputs":[]},{"cell_type":"code","source":"df = df[~df['context'].apply(lambda x: is_arabic_name(x))]","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:27:36.964897Z","iopub.execute_input":"2024-04-17T13:27:36.965801Z","iopub.status.idle":"2024-04-17T13:27:37.979661Z","shell.execute_reply.started":"2024-04-17T13:27:36.965766Z","shell.execute_reply":"2024-04-17T13:27:37.978773Z"},"trusted":true},"execution_count":18,"outputs":[]},{"cell_type":"code","source":"df_sample =df.loc[:50]","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:27:38.656780Z","iopub.execute_input":"2024-04-17T13:27:38.657148Z","iopub.status.idle":"2024-04-17T13:27:38.664302Z","shell.execute_reply.started":"2024-04-17T13:27:38.657120Z","shell.execute_reply":"2024-04-17T13:27:38.663227Z"},"trusted":true},"execution_count":19,"outputs":[]},{"cell_type":"code","source":"def transform_json_to_records(json_data):\n \"\"\"\n Transforms a structured JSON object into a list of records.\n\n The function assumes the structure of the JSON object is a dictionary of dictionaries,\n where each top-level key is a field name, and its value is a dictionary mapping indices\n to field values. All sub-dictionaries must have the same keys.\n\n Parameters:\n - json_data: A dictionary representing the structured JSON object to transform.\n\n Returns:\n - A list of dictionaries, where each dictionary represents a record with fields and values\n derived from the input JSON.\n \"\"\"\n json_data = json.loads(json_data)\n # Extract keys from the first dictionary item to use as indices\n indices = list(next(iter(json_data.values())).keys())\n # Initialize the list to store transformed records\n records = []\n\n # Loop over each index to create a record\n for index in indices:\n record = {field: values[index] for field, values in json_data.items()}\n records.append(record)\n\n return records","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:27:43.963685Z","iopub.execute_input":"2024-04-17T13:27:43.964431Z","iopub.status.idle":"2024-04-17T13:27:43.971661Z","shell.execute_reply.started":"2024-04-17T13:27:43.964395Z","shell.execute_reply":"2024-04-17T13:27:43.970462Z"},"trusted":true},"execution_count":20,"outputs":[]},{"cell_type":"code","source":"df_sample.loc[:, 'answer'] = df_sample['answer'].map(transform_json_to_records)","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:27:45.817229Z","iopub.execute_input":"2024-04-17T13:27:45.817713Z","iopub.status.idle":"2024-04-17T13:27:45.828486Z","shell.execute_reply.started":"2024-04-17T13:27:45.817673Z","shell.execute_reply":"2024-04-17T13:27:45.827241Z"},"trusted":true},"execution_count":21,"outputs":[]},{"cell_type":"code","source":"df_sample.head()","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:27:47.330348Z","iopub.execute_input":"2024-04-17T13:27:47.331245Z","iopub.status.idle":"2024-04-17T13:27:47.371504Z","shell.execute_reply.started":"2024-04-17T13:27:47.331187Z","shell.execute_reply":"2024-04-17T13:27:47.370353Z"},"trusted":true},"execution_count":22,"outputs":[{"execution_count":22,"output_type":"execute_result","data":{"text/plain":" context \\\n0 aircraft ... \n1 order year manufacturer mod... \n2 player no nationality ... \n3 player no nationali... \n4 player no nationality ... \n\n answer \n0 [{'aircraft': 'robinson r - 22', 'description'... \n1 [{'order year': '1992 - 93', 'manufacturer': '... \n2 [{'player': 'quincy acy', 'no': '4', 'national... \n3 [{'player': 'patrick o'bryant', 'no': 13, 'nat... \n4 [{'player': 'mark baker', 'no': '3', 'national... ","text/html":"
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
contextanswer
0aircraft ...[{'aircraft': 'robinson r - 22', 'description'...
1order year manufacturer mod...[{'order year': '1992 - 93', 'manufacturer': '...
2player no nationality ...[{'player': 'quincy acy', 'no': '4', 'national...
3player no nationali...[{'player': 'patrick o'bryant', 'no': 13, 'nat...
4player no nationality ...[{'player': 'mark baker', 'no': '3', 'national...
\n
"},"metadata":{}}]},{"cell_type":"markdown","source":"\n#
V | Benchmark
\n","metadata":{}},{"cell_type":"markdown","source":"\n>## Prompt","metadata":{}},{"cell_type":"code","source":"prompt = \"\"\"Your task is to extract relevant information from the provided context and format it into a list of records, following the template below.\n A JSON object representing the extracted table structure. The list of records follows this format: \n [ { \"column_1\": \"val1\",\"column_2\": \"val1\",\"column_3\": \"val1\",...},\n { \"column_1\": \"val2\",\"column_2\": \"val2\",\"column_3\": \"val3\",...},\n ...\n ]\n Each key in the records represents a column header, and the corresponding value is another object containing key-value pairs for each row in that column.\n\nINPUT example:\n# do not use the data from the examples & template; they are just for reference only. The following data contains actual information. If a value is not found, leave it empty. \n\n aircraft description max gross weight total disk area max disk loading\n0 robinson r - 22 light utility helicopter 1370 lb (635 kg) 497 ft square (46.2 m square) 2.6 lb / ft square (14 kg / m square)\n1 bell 206b3 jetranger turboshaft utility helicopter 3200 lb (1451 kg) 872 ft square (81.1 m square) 3.7 lb / ft square (18 kg / m square)\n2 ch - 47d chinook tandem rotor helicopter 50000 lb (22680 kg) 5655 ft square (526 m square) 8.8 lb / ft square (43 kg / m square)\n3 mil mi - 26 heavy - lift helicopter 123500 lb (56000 kg) 8495 ft square (789 m square) 14.5 lb / ft square (71 kg / m square)\n4 ch - 53e super stallion heavy - lift helicopter 73500 lb (33300 kg) 4900 ft square (460 m square) 15 lb / ft square (72 kg / m square)\n\nOUTPUT example:\n# do not use the data from the examples & template; they are just for reference only. The following data contains actual information. If a value is not found, leave it empty. \n[{\"aircraft\": \"robinson r - 22\",\n \"description\": \"light utility helicopter\",\n \"max gross weight\": \"1370 lb (635 kg)\",\n \"total disk area\": \"497 ft square (46.2 m square)\",\n \"max disk loading\": \"2.6 lb / ft square (14 kg / m square)\"},\n{\"aircraft\": \"bell 206b3 jetranger\",\n \"description\": \"turboshaft utility helicopter\",\n \"max gross weight\": \"3200 lb (1451 kg)\",\n \"total disk area\": \"872 ft square (81.1 m square)\",\n \"max disk loading\": \"3.7 lb / ft square (18 kg / m square)\"},\n{\"aircraft\": \"ch - 47d chinook\",\n \"description\": \"tandem rotor helicopter\",\n \"max gross weight\": \"50000 lb (22680 kg)\",\n \"total disk area\": \"5655 ft square (526 m square)\",\n \"max disk loading\": \"8.8 lb / ft square (43 kg / m square)\"},\n{\"aircraft\": \"mil mi - 26\",\n \"description\": \"heavy - lift helicopter\",\n \"max gross weight\": \"123500 lb (56000 kg)\",\n \"total disk area\": \"8495 ft square (789 m square)\",\n \"max disk loading\": \"14.5 lb / ft square (71 kg / m square)\"},\n{\"aircraft\": \"ch - 53e super stallion\",\n \"description\": \"heavy - lift helicopter\",\n \"max gross weight\": \"73500 lb (33300 kg)\",\n \"total disk area\": \"4900 ft square (460 m square)\",\n \"max disk loading\": \"15 lb / ft square (72 kg / m square)\"}]\n\"\"\"","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:27:54.601480Z","iopub.execute_input":"2024-04-17T13:27:54.602278Z","iopub.status.idle":"2024-04-17T13:27:54.609988Z","shell.execute_reply.started":"2024-04-17T13:27:54.602243Z","shell.execute_reply":"2024-04-17T13:27:54.608951Z"},"trusted":true},"execution_count":23,"outputs":[]},{"cell_type":"markdown","source":"\n>## dolphin-2.2.1-mistral-7b","metadata":{}},{"cell_type":"code","source":"base_model_id = \"cognitivecomputations/dolphin-2.2.1-mistral-7b\"\nbnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.bfloat16,\n #weights=\"int8\"\n)\n\nmodel = AutoModelForCausalLM.from_pretrained(base_model_id, quantization_config=bnb_config, device_map=\"auto\",trust_remote_code=True)\ntokenizer = AutoTokenizer.from_pretrained(base_model_id, use_fast=False,device_map=\"auto\")","metadata":{"execution":{"iopub.status.busy":"2024-04-17T09:11:30.976151Z","iopub.execute_input":"2024-04-17T09:11:30.976388Z","iopub.status.idle":"2024-04-17T09:14:10.696683Z","shell.execute_reply.started":"2024-04-17T09:11:30.976368Z","shell.execute_reply":"2024-04-17T09:14:10.695768Z"},"trusted":true},"execution_count":41,"outputs":[{"output_type":"display_data","data":{"text/plain":"config.json: 0%| | 0.00/618 [00:00\n>## starcoder2-7b","metadata":{}},{"cell_type":"code","source":"base_model_id = \"Vezora/Mistral-22B-v0.1\"\nbnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.bfloat16,\n #weights=\"int8\"\n)\n\nmodel = AutoModelForCausalLM.from_pretrained(base_model_id, quantization_config=bnb_config, device_map=\"auto\",trust_remote_code=True)\ntokenizer = AutoTokenizer.from_pretrained(base_model_id, use_fast=False,device_map=\"auto\")","metadata":{"execution":{"iopub.status.busy":"2024-04-17T13:28:02.170950Z","iopub.execute_input":"2024-04-17T13:28:02.171380Z","iopub.status.idle":"2024-04-17T13:41:38.909545Z","shell.execute_reply.started":"2024-04-17T13:28:02.171347Z","shell.execute_reply":"2024-04-17T13:41:38.908583Z"},"trusted":true},"execution_count":24,"outputs":[{"output_type":"display_data","data":{"text/plain":"config.json: 0%| | 0.00/662 [00:00Here is an example of how you can extract the information and format it into a list of records using Python:\\n```python\\nimport json\\n\\n# Define the table structure\\ntable_structure = {\\n \"column_1\": \"aircraft\",\\n \"column_2\": \"description\",\\n \"column_3\": \"max gross weight\",\\n \"column_4\": \"total disk area\",\\n \"column_5\": \"max disk loading\",\\n}\\n\\n# Define the list of records\\nlist_of_records = []\\n\\n# Extract the information from the table\\ntable_information = \"0 robotin r - 22 light utility helicopter 1370 lb (635 kg) 497 ft square (46.2 m square) 2.6 lb / ft square (14.3 kg / m square)\\n\\n# Split the table into rows\\ntable_rows = table_information.split(\"\\\\n\")\\n\\n# Iterate over each row\\nfor row in table_rows:\\n # Split the row into columns\\n row_parts = row.split()\\n\\n # Create a dictionary for each column\\n column_dict = {}\\n\\n # Iterate over each column\\n for column in column_dict.keys(), enumerate(row_parts):\\n # Add the value to the dictionary\\n column_dict[column] = column_value\\n\\n # Add the row dictionary to the list of records\\n list_of_records.append(column_dict)\\n\\n# Convert the list of records into a JSON object\\njson_table = json.dumps(list_of_records)\\n\\n# Print the JSON table\\nprint(json_table)\\n```\\nOutput:\\n```\\n[{\"column_1\": \"robin\", \"column_2\": \"r-22\", \"column_3\": \"light\", \"column_4\": \"utility\", \"column_5\": \"helicopter\", \"column_6\": \"1370\", \"column_7\": \"lb\", \"column_8\": \"635\", \"column_9\": \"kg\", \"column_10\": \"497\", \"column_11\": \"ft\", \"column_12\": \"square\", \"column_13\": \"46.2\", \"column_14\": \"m\", \"column_15\": \"2.6\", \"column_16\": \"lb\", \"column_17\": \"ft\", \"column_18\": \"14.3\", \"column_19\": \"kg\", \"column_20\": \"m\", \"column_21\": \"square\", \"column_22\": \"2.6\", \"column_23\": \"lb\", \"column_24\": \"ft\", \"column_25\": \"14.3\", \"column_26\": \"kg\", \"column_27\": \"m\", \"column_28\": \"square\", \"column_29\": \"2.6\", \"column_30\": \"lb\", \"column_31\": \"ft\", \"column_32\": \"14.3\", \"column_33\": \"kg\", \"column_34\": \"m\", \"column_35\": \"square\", \"column_36\": \"2.6\", \"column_37\": \"lb\", \"column_38\": \"ft\", \"column_39\": \"14.3\", \"column_40\": \"kg\", \"column_41\": \"m\", \"column_42\": \"square\", \"column_43\": \"2.6\", \"column_44\": \"lb\", \"column_45\": \"ft\", \"column_46\": \"14.3\", \"column_47\": \"kg\", \"column_48\": \"m\", \"column_49\": \"square\", \"column_50\": \"2.6\", \"column_51\": \"lb\", \"column_52\": \"ft\", \"column_53\": \"14.3\", \"column_54\": \"kg\", \"column_55\": \"m\", \"column_56\": \"square\", \"column_57\": \"2.6\", \"column_58\": \"lb\", \"column_59\": \"ft\", \"column_60\": \"14.3\", \"column_61\": \"kg\", \"column_62\": \"m\", \"column_63\": \"square\", \"column_64\": \"2.6\", \"column_65\": \"lb\", \"column_66\": \"ft\", \"column_67\": \"14.3\", \"column_68\": \"kg\", \"column_69\": \"m\", \"column_70\": \"square\", \"column_71\": \"2.6\", \"column_72\": \"lb\", \"column_73\": \"ft\", \"column_74\": \"14.3\", \"column_75\": \"kg\", \"column_76\": \"m\", \"column_77\": \"square\", \"column_78\": \"2.6\", \"column_79\": \"lb\", \"column_80\": \"ft\", \"column_81\": \"14.3\", \"column_82\": \"kg\", \"column_83\": \"m\", \"column_84\": \"square\", \"column_85\": \"2.6\", \"column_86\": \"lb\", \"column_87\": \"ft\", \"column_88\": \"14.3\", \"column_89\": \"kg\", \"column_10\": \"m\", \"column_11\": \"2.6\", \"column_12\": \"lb\", \"column_13\": \"ft\", \"column_14\": \"14.3\", \"column_15\": \"kg\", \"column_16\": \"m\", \"column_17\": \"ft\", \"column_18\": \"2.6\", \"column_19\": \"lb\", \"column_20\": \"ft\", \"column_21\": \"14.3\", \"column_22\": \"kg\", \"column_23\": \"m\", \"column_24\": \"ft\", \"column_25\": \"2.6\", \"column_26\": \"lb\", \"column_27\": \"ft\", \"column_28\": \"14.3\", \"column_29\": \"kg\", \"column_30\": \"m\", \"column_31\": \"ft\", \"column_32\": \"2.6\", \"column_33\": \"lb\", \"column_34\": \"ft\", \"column_35\": \"14.3\", \"column_36\": \"kg\", \"column_37\": \"m\", \"column_38\": \"ft\", \"column_39\": \"2.6\", \"column_40\": \"lb\", \"column_41\": \"ft\", \"column_42\": \"14.3\", \"column'"},"metadata":{}}]}]}