{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "kernelspec": {
      "display_name": "Python 3",
      "language": "python",
      "name": "python3"
    },
    "language_info": {
      "codemirror_mode": {
        "name": "ipython",
        "version": 3
      },
      "file_extension": ".py",
      "mimetype": "text/x-python",
      "name": "python",
      "nbconvert_exporter": "python",
      "pygments_lexer": "ipython3",
      "version": "3.6.10"
    },
    "colab": {
      "name": "augment_wikitable.ipynb",
      "provenance": []
    }
  },
  "cells": [
    {
      "cell_type": "code",
      "metadata": {
        "id": "HDQTQgRzNjZg",
        "outputId": "c1035e86-fd20-430b-90f9-2d73678563b0"
      },
      "source": [
        "import os\n",
        "import re\n",
        "import json\n",
        "import random\n",
        "import codecs\n",
        "from template_config import *\n",
        "from nltk import word_tokenize\n",
        "from collections import defaultdict\n",
        "from transformers.tokenization_roberta import RobertaTokenizer\n",
        "\n",
        "SEP_TOKEN = \"</s>\"\n",
        "tokenizer = RobertaTokenizer.from_pretrained(\"roberta-base\")\n",
        "MAX_TOKEN_LEN = 189"
      ],
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "To use data.metrics please install scikit-learn. See https://scikit-learn.org/stable/index.html\n"
          ],
          "name": "stderr"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "EXbCmV4BNjZj"
      },
      "source": [
        "### Read tables from text files"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "aHPv7VhlNjZj"
      },
      "source": [
        "train_corpus = \"data/wikitable_dup1_row1.txt\"\n",
        "# output_file = \"data/data_comb_tables.txt\""
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "xJzhjeT3NjZk"
      },
      "source": [
        "def hasNumbers(inputString):\n",
        "    return any(char.isdigit() for char in inputString)\n",
        "\n",
        "def check_name(inpStr):\n",
        "    return len(inpStr) > 1 and \"-\" not in inpStr and not hasNumbers(inpStr)\n",
        "\n",
        "def gen_name(title, must_have=False):\n",
        "    title_tokens = title.split(\" \")\n",
        "    qualify_words = []\n",
        "    for w in title_tokens:\n",
        "        if check_name(w):\n",
        "            qualify_words.append(w)\n",
        "    \n",
        "    if random.random() < 0.4:\n",
        "        name = \" \".join(qualify_words[-2:])\n",
        "    else:\n",
        "        name = \" \".join(qualify_words[-1:])\n",
        "    \n",
        "    if name != \"\":\n",
        "        return name\n",
        "    \n",
        "    if must_have:\n",
        "        return title_tokens[0]\n",
        "    else:\n",
        "        return name"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "KgHmOyJ1NjZk"
      },
      "source": [
        "def main_process(train_corpus):\n",
        "    total_count = 0\n",
        "    webtables = []\n",
        "    with open(train_corpus, \"r\", encoding=\"utf-8\") as f:\n",
        "        for line in f:\n",
        "            skip = False\n",
        "            if total_count % 100000 == 0:\n",
        "                print(\"processed: \", total_count)\n",
        "            tokens = line.lower().replace(\"<special7>\", \"<tabn>\").replace(\"<special8>\", \"<coln>\").replace(\"<special9>\", \"<entry>\").replace(\"*\", \"\").replace(\"|||\", \"\")\n",
        "            table = {\"columns\": [], \"values\": [], \"columns_original\": [], \"column_types\": []}\n",
        "            chunks = tokens.split(\" <coln> \")\n",
        "            for chunk in chunks:\n",
        "                if \"<tabn>\" in chunk:\n",
        "                    page_title = chunk.replace(\"<tabn>\", \"\").strip()\n",
        "                    table_name = gen_name(page_title)\n",
        "                    table[\"name\"] = table_name\n",
        "                    if table_name == \"\" or len(table_name) < 2:\n",
        "                        skip = True\n",
        "                else:\n",
        "                    assert \"<entry>\" in chunk\n",
        "                    chunk_toks = chunk.split(\" <entry> \")\n",
        "                    if len(chunk_toks) == 2:\n",
        "                        col_name, entry = chunk_toks[0].strip(), chunk_toks[1].strip()\n",
        "                        if len(col_name) > 1:\n",
        "                            table[\"columns\"].append(\" \".join(col_name.split(\" \")[:5]))\n",
        "                            table[\"columns_original\"].append(col_name)\n",
        "                            ctype = \"text\"\n",
        "                            if entry.isdigit():\n",
        "                                ctype = \"real\"\n",
        "                            table[\"column_types\"].append(ctype)\n",
        "                            table[\"values\"].append(\" \".join(entry.split(\" \")[:5]))\n",
        "                            \n",
        "            \n",
        "            if len(table[\"columns\"]) < 3:\n",
        "                skip = True\n",
        "            if not skip:\n",
        "                table_name = table[\"name\"]\n",
        "                table[\"columns\"] = [table_name + \" *\"] + table[\"columns\"]\n",
        "                table[\"columns_original\"] = [\"*\"] + table[\"columns_original\"]\n",
        "                table[\"column_types\"] = [\"text\"] + table[\"column_types\"]\n",
        "                table[\"values\"] = [\"all\"] + table[\"values\"]\n",
        "                tabn_str = \"_\".join(table_name.split(\" \"))\n",
        "                table[\"columns\"] = [tabn_str +\" \"+ hd for hd in table[\"columns\"]]\n",
        "                if \"*\" not in table['columns'][0]:\n",
        "                    print(table['columns'])\n",
        "                webtables.append(table)\n",
        "                \n",
        "            total_count += 1\n",
        "            \n",
        "    return webtables"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "EXD_aA5YNjZl"
      },
      "source": [
        "web_tables = main_process(train_corpus)"
      ],
      "execution_count": 1,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "v9gFUXHmNjZm"
      },
      "source": [
        "### Read NL-SQL templates and sql component mapping file"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "MQ3AOT5bNjZm"
      },
      "source": [
        "MAX_COL_NUM = 25\n",
        "OPS = [\"=\", \">\", \"<\", \">=\", \"<=\", \"!=\", \"LIKE\"]\n",
        "nlsql_templates_file = \"data/nlsql_templates.txt\"\n",
        "spider_data_file = '/data/projects/nl2sql/datasets_final/data/'\n",
        "sql_components_file = \"data/sql_components.json\""
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "B1ctJI13NjZn"
      },
      "source": [
        "# read NL-SQL templates\n",
        "templates = []\n",
        "with open(nlsql_templates_file) as fp:\n",
        "    lines = fp.readlines()\n",
        "    template_one = {}\n",
        "    for line in lines:\n",
        "        if \"\\n\" == line:\n",
        "            templates.append(template_one) \n",
        "        elif \"SQL Pattern:\" in line:\n",
        "            template_one = {}\n",
        "            sps = line.strip().replace(\"SQL Pattern: \", \"\").split(\"|||\")\n",
        "            template_one[\"questions\"] = []\n",
        "            if len(sps) == 1:\n",
        "                template_one[\"SQL pattern\"] = sps[0]\n",
        "                template_one[\"SQL constraints\"] = []\n",
        "            elif len(sps) == 2:\n",
        "                template_one[\"SQL pattern\"] = sps[0]\n",
        "                template_one[\"SQL constraints\"] = [x.strip() for x in sps[1].split(\"|\") if x != \" \"]\n",
        "            else:\n",
        "                print(\"\\n======Error warning!!!!\")\n",
        "        elif \"count: \" in line:\n",
        "            sql_count = int(line.strip().replace(\"count: \", \"\"))\n",
        "            template_one[\"count\"] = sql_count\n",
        "        elif \"question:  \" in line:\n",
        "            sps = line.strip().replace(\"question:  \", \"\").split(\"|||\")\n",
        "            question = sps[0]\n",
        "            if len(sps) == 2:\n",
        "                q_constraints = [x.strip() for x in sps[1].split(\"|\") if x != \" \"]\n",
        "            else:\n",
        "                q_constraints = []\n",
        "            template_one[\"questions\"].append((question, q_constraints))"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Qvu2J4CiNjZo"
      },
      "source": [
        "templates_one_table = []\n",
        "for template in templates:\n",
        "    sql_constraints = template['SQL constraints']\n",
        "    sql_pattern = template[\"SQL pattern\"]\n",
        "    questions = template[\"questions\"]\n",
        "    skip = False\n",
        "    for constraint in sql_constraints:\n",
        "        if \"id\" in constraint or \"T1\" in constraint:\n",
        "            skip = True\n",
        "    questions_after = []     \n",
        "    if not skip:\n",
        "        for q, qc in questions:\n",
        "            if \"TABLE1\" not in q:\n",
        "                questions_after.append((q, qc))\n",
        "        if len(questions_after) > 0:\n",
        "            template_one = {}\n",
        "            template_one['SQL constraints'] = sql_constraints\n",
        "            template_one['SQL pattern'] = sql_pattern\n",
        "            template_one[\"questions\"] = questions_after\n",
        "            templates_one_table.append(template_one)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "NcS70qEENjZo",
        "outputId": "c5741228-6212-4da5-db35-b1e27e6a737d"
      },
      "source": [
        "all_constraints = []\n",
        "for tmp in templates_one_table:\n",
        "    all_constraints.extend(tmp['SQL constraints'])\n",
        "    for q in tmp['questions']:\n",
        "        all_constraints.extend(q[1])\n",
        "\n",
        "print(list(set(all_constraints)))"
      ],
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "['{AGG0}=MIN', 'P0=<', 'P1==', 'P0=>', 'P0=P1==', 'P0==', '{AGG0}=MAX', 'P0=P1=P2==']\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "9x5djtunNjZp"
      },
      "source": [
        "# read SQL component file\n",
        "with open(sql_components_file) as json_file:\n",
        "    sql_components = json.load(json_file)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "BBHvVkU4NjZp"
      },
      "source": [
        "### Unify and combine tables as databases"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "0iMbdZv3NjZp"
      },
      "source": [
        "def create_dbs(tables):\n",
        "#     random.shuffle(tables)  \n",
        "    dbs = []\n",
        "    cur_cols = []\n",
        "    db_one = []\n",
        "    ahd_cols = []\n",
        "    for i, tab in enumerate(tables):\n",
        "        if i % 100000 == 0:\n",
        "            print(\"processed: \", i)\n",
        "        if len(db_one) <= random.choice([0, 1]) and len(ahd_cols) < MAX_COL_NUM:\n",
        "            db_one.append(tab)\n",
        "            cur_cols.extend([col+\".\"+tab[\"name\"] for col in tab[\"columns\"]])\n",
        "            if i+1 < len(tables):\n",
        "                ahd_cols = cur_cols + [col+\".\"+tables[i+1][\"name\"] for col in tables[i+1][\"columns\"]]\n",
        "            else:\n",
        "                 break\n",
        "        else:\n",
        "            if len(cur_cols) == len(list(set(cur_cols))):\n",
        "                if len(db_one) > 1:\n",
        "                    db_one_new = []\n",
        "                    for tab in db_one:\n",
        "                        if tab[\"columns\"][0] == \"id\":\n",
        "                            tab[\"columns\"] = tab[\"columns\"][1:]\n",
        "                            tab[\"column_types\"] = tab[\"column_types\"][1:]\n",
        "                            tab[\"columns_original\"] = tab[\"columns_original\"][1:]\n",
        "                            tab[\"values\"] = tab[\"values\"][1:]\n",
        "                            \n",
        "                        if random.random() < 0.7:\n",
        "                            index_col = \"id\"\n",
        "                            if random.random() < 0.3:\n",
        "                                index_col = \"name\"\n",
        "\n",
        "                            if index_col not in tab[\"columns\"]:\n",
        "                                tabn_str = \"_\".join(tab[\"name\"].split(\" \"))\n",
        "                                tab[\"columns\"] = [tab[\"columns\"][0]] + [tabn_str +\" \"+ index_col] + tab[\"columns\"][1:]\n",
        "                                val_add = 1\n",
        "                                if index_col == \"name\":\n",
        "                                    val_add = \"value\"\n",
        "                                tab[\"values\"] = [tab[\"values\"][0]] + [val_add] + tab[\"values\"][1:]\n",
        "                                tab[\"column_types\"] = [tab[\"column_types\"][0]] + [\"text\"] + tab[\"column_types\"][1:]\n",
        "                                tab[\"columns_original\"] = [tab[\"columns_original\"][0]] + [index_col] + tab[\"columns_original\"][1:]\n",
        "                        db_one_new.append(tab)\n",
        "                    dbs.append(db_one_new)\n",
        "                else:\n",
        "                    dbs.append(db_one)\n",
        "            db_one = []\n",
        "            cur_cols = []\n",
        "            ahd_cols = []\n",
        "            \n",
        "    return dbs"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "A1UNmE90NjZq",
        "outputId": "72137197-5a6a-4454-d49e-f0f0c526db2d"
      },
      "source": [
        "web_tables[5]"
      ],
      "execution_count": null,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "{'columns': ['cup_team cup team *',\n",
              "  'cup_team player',\n",
              "  'cup_team years played',\n",
              "  'cup_team total w-l',\n",
              "  'cup_team singles w-l',\n",
              "  'cup_team doubles w-l'],\n",
              " 'values': ['all', 'cho soong-jae ( 110 )', '1 ( 1011 )', '1–0', '1–0', '1–0'],\n",
              " 'columns_original': ['*',\n",
              "  'player',\n",
              "  'years played',\n",
              "  'total w-l',\n",
              "  'singles w-l',\n",
              "  'doubles w-l'],\n",
              " 'column_types': ['text', 'text', 'text', 'text', 'text', 'text'],\n",
              " 'name': 'cup team'}"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 13
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "0VrNo1ESNjZq",
        "outputId": "853afb64-2dac-4a03-e36d-a42e6fbde980"
      },
      "source": [
        "webtable_dbs = create_dbs(web_tables)"
      ],
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "processed:  0\n",
            "processed:  100000\n",
            "processed:  200000\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "8ZIDNCljNjZq",
        "outputId": "09123345-b6d2-417c-b16f-c330cf1f7b54"
      },
      "source": [
        "len(webtable_dbs)"
      ],
      "execution_count": null,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "83701"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 15
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "nFaBt7ckNjZr"
      },
      "source": [
        "for db in webtable_dbs[:1000]:\n",
        "    tab_names = []\n",
        "    col_count = 0\n",
        "    for tab in db:\n",
        "        tab_names.append(tab[\"name\"])\n",
        "        col_count += len(tab[\"columns\"])\n",
        "    print(\"----------\")\n",
        "    print(\"table names: \", tab_names)\n",
        "    print(\"column num: \", col_count)\n",
        "    print(\"table num: \", len(tab_names))"
      ],
      "execution_count": 2,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "eFoFUzCRNjZr"
      },
      "source": [
        "### Start generate NL-SQL examples based on new databases and CFG grammars"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "6dSg-xszNjZr"
      },
      "source": [
        "##### detect question and SQL slots and process constraints"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "QzFCXkRDNjZr"
      },
      "source": [
        "def get_sql_slots(sql_pattern):\n",
        "    sql_tokens = sql_pattern.split(\" \")\n",
        "    columns = {}\n",
        "    ops = {}\n",
        "    values = {}\n",
        "    aggs = {}\n",
        "    dasc = False\n",
        "    slots = []\n",
        "    val_pros = []\n",
        "    for i, tok in enumerate(sql_tokens):\n",
        "        if \"{\" in tok and \"}\" in tok and \"FROM\" not in tok:\n",
        "            if tok not in slots:\n",
        "                slots.append(tok)\n",
        "                \n",
        "        if \"AGG\" in tok:\n",
        "            if i + 2 < len(sql_tokens) and \"(\" == sql_tokens[i+1]:\n",
        "                if \"COLUMN\" in sql_tokens[i+2]:\n",
        "                    if sql_tokens[i+2] not in columns.keys():\n",
        "                        columns[sql_tokens[i+2]] = [\"number\"]\n",
        "                    else:\n",
        "                        columns[sql_tokens[i+2]].append(\"number\")\n",
        "                    aggs[tok] = sql_tokens[i+2]\n",
        "                else:\n",
        "                    print(\"\\nTemplate Error: AGG format is wrong!!!\")\n",
        "                    print(sql_pattern)\n",
        "        elif \"COLUMN\" in tok:\n",
        "            if tok not in columns.keys():\n",
        "                columns[tok] = []\n",
        "        elif \"OP\" in tok:\n",
        "            if i - 1 >= 0 and \"COLUMN\" in sql_tokens[i-1]:\n",
        "                ops[tok] = [sql_tokens[i-1]]\n",
        "                if i + 1 < len(sql_tokens) and \"VALUE\" in sql_tokens[i+1]:\n",
        "                    ops[tok].append(sql_tokens[i+1])\n",
        "                    val_pros.append(sql_tokens[i+1])\n",
        "            elif i - 2 >= 0 and \")\" == sql_tokens[i-1] and (\"COLUMN\" in sql_tokens[i-2] or \"*\" == sql_tokens[i-2]):\n",
        "                ops[tok] = [sql_tokens[i-2]]\n",
        "                if i + 1 < len(sql_tokens) and \"VALUE\" in sql_tokens[i+1]:\n",
        "                    ops[tok].append(sql_tokens[i+1])\n",
        "                    val_pros.append(sql_tokens[i+1])\n",
        "            else:\n",
        "                print(\"\\nTemplate Error: OP format is wrong!!!\")\n",
        "                print(sql_pattern)\n",
        "        elif \"VALUE\" in tok and tok not in val_pros:\n",
        "            \"\"\"\n",
        "            OP} {VALUE0}\n",
        "            LIMIT {VALUE0}\n",
        "            {COLUMN1} BETWEEN {VALUE0} AND {VALUE1}\n",
        "            HAVING COUNT ( * ) {OP1} {VALUE1}\n",
        "            = {VALUE1}\n",
        "            \"\"\"\n",
        "            if i - 2 >= 0 and (\"BETWEEN\" == sql_tokens[i-1] or \"AND\" == sql_tokens[i-1]):\n",
        "                values[tok] = \"number\"\n",
        "                if \"BETWEEN\" == sql_tokens[i-1]:\n",
        "                    columns[sql_tokens[i-2]].append(\"number\")\n",
        "            elif i - 1 >= 0 and \"LIMIT\" == sql_tokens[i-1]:\n",
        "                values[tok] = \"integer\"\n",
        "            elif i - 1 >= 0 and \"=\" == sql_tokens[i-1]:\n",
        "                assert \"COLUMN\" in sql_tokens[i-2]\n",
        "                columns[sql_tokens[i-2]].append(tok)\n",
        "            else:\n",
        "                print(\"\\nTemplate Error: VALUE format is wrong!!!\")\n",
        "                print(sql_pattern)\n",
        "        elif \"DASC\" in tok:\n",
        "            dasc = True\n",
        "    \n",
        "    return (list(set(slots)), columns, ops, values, aggs, dasc)\n",
        "\n",
        "\n",
        "def get_q_slots(question):\n",
        "    q_toks = question.strip().split(\" \")\n",
        "    q_slots = list(set([tok for tok in q_toks if \"TABLE\" in tok or \"SC\" in tok or (\"{\" in tok and \"}\" in tok)]))\n",
        "    \n",
        "    return q_slots\n",
        "    \n",
        "\n",
        "def process_constraints(constraints, columns, slots):\n",
        "    slot_values = {}\n",
        "    skip_db_with_one_table = False\n",
        "    for constraint in constraints:\n",
        "        if \"P0==\" == constraint:\n",
        "            assert \"{OP0}\" in slots\n",
        "            slot_values[\"{OP0}\"] = \"=\"\n",
        "        elif \"P1==\" == constraint:\n",
        "            assert \"{OP1}\" in slots\n",
        "            slot_values[\"{OP1}\"] = \"=\"\n",
        "        elif \"P0=P1==\" == constraint:\n",
        "            assert \"{OP0}\" in slots and \"{OP1}\" in slots\n",
        "            slot_values[\"{OP0}\"] = \"=\"\n",
        "            slot_values[\"{OP1}\"] = \"=\"\n",
        "        elif \"P0=P1=P2==\" == constraint:\n",
        "            assert \"{OP0}\" in slots and \"{OP1}\" in slots and \"{OP2}\" in slots\n",
        "            slot_values[\"{OP0}\"] = \"=\"\n",
        "            slot_values[\"{OP1}\"] = \"=\"\n",
        "            slot_values[\"{OP2}\"] = \"=\"\n",
        "        elif \"P0=>\" == constraint:\n",
        "            assert \"{OP0}\" in slots\n",
        "            slot_values[\"{OP0}\"] = \">\"\n",
        "        elif \"P0=<\" == constraint:\n",
        "            assert \"{OP0}\" in slots\n",
        "            slot_values[\"{OP0}\"] = \"<\"\n",
        "        elif \"{AGG0}=MIN\" == constraint:\n",
        "            assert \"{AGG0}\" in slots\n",
        "            slot_values[\"{AGG0}\"] = \"MIN\"\n",
        "        elif \"{AGG0}=MAX\" == constraint:\n",
        "            assert \"{AGG0}\" in slots\n",
        "            slot_values[\"{AGG0}\"] = \"MAX\"\n",
        "        elif \"C0-id\" == constraint:\n",
        "            skip_db_with_one_table = True\n",
        "            assert \"{COLUMN0}\" in slots and \"{COLUMN0}\" in columns.keys()\n",
        "            columns[\"{COLUMN0}\"].append(\"id\")\n",
        "        elif \"C1-id\" == constraint:\n",
        "            skip_db_with_one_table = True\n",
        "            assert \"{COLUMN1}\" in slots and \"{COLUMN1}\" in columns.keys()\n",
        "            columns[\"{COLUMN1}\"].append(\"id\")\n",
        "        elif \"C2-id\" == constraint:\n",
        "            skip_db_with_one_table = True\n",
        "            assert \"{COLUMN2}\" in slots and \"{COLUMN2}\" in columns.keys()\n",
        "            columns[\"{COLUMN2}\"].append(\"id\")\n",
        "        elif \"C3-T1\" == constraint:\n",
        "            skip_db_with_one_table = True\n",
        "            assert \"{COLUMN3}\" in slots and \"{COLUMN3}\" in columns.keys()\n",
        "            columns[\"{COLUMN3}\"].append(\"T1\")\n",
        "        elif \"T0-T1-JOIN\" == constraint or 'T0-T1-NO-JOIN' == constraint:\n",
        "            skip_db_with_one_table = True\n",
        "            columns[\"{COLUMN0}\"].append(\"T0\")\n",
        "            if \"{COLUMN1}\" in columns.keys():\n",
        "                columns[\"{COLUMN1}\"].append(\"T1\")\n",
        "    \n",
        "    return (slot_values, columns, skip_db_with_one_table)\n",
        "\n",
        "\n",
        "# helper function\n",
        "def gen_col_info(col_str, columns, columns_inf):\n",
        "    col_conds = columns[col_str]\n",
        "    value_slot = [cc for cc in col_conds if \"VALUE\" in cc]\n",
        "    col = \"\"\n",
        "    value_val = None\n",
        "    if \"id\" in col_conds:\n",
        "        has_id = False\n",
        "        for c, t, v in columns_inf:\n",
        "            if \"id\" in col or \"name\" in col:\n",
        "                has_id = True\n",
        "                col, ctype, values = c, t, v\n",
        "                break\n",
        "        if not has_id:\n",
        "            col, ctype, value = columns_inf[0]\n",
        "    elif \"number\" in col_conds:\n",
        "        for colinfo in columns_inf[1:]:\n",
        "            if colinfo[1] == \"real\":\n",
        "                col, ctype, value = colinfo\n",
        "    if col == \"\":\n",
        "        col, ctype, value = random.choice(columns_inf[1:])\n",
        "\n",
        "    if len(value_slot) > 0:\n",
        "        assert len(value_slot) < 3\n",
        "        if len(value_slot) == 1:\n",
        "            value_val = [(value_slot[0], value)]\n",
        "        else:\n",
        "            value_val = [(value_slot[0], value), (value_slot[1], value)]\n",
        "    \n",
        "    return (col, value_val)\n",
        "\n",
        "\n",
        "def replace_dict(inp, dicts):\n",
        "    for rep_in, rep_out in dicts.items():\n",
        "        inp = inp.replace(rep_in, str(rep_out))\n",
        "    \n",
        "    return inp\n"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "ZMS_H2VeNjZs"
      },
      "source": [
        "##### Get classification label for each column based on SQL templates"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "jQoQYmMaNjZs"
      },
      "source": [
        "STRUCT_KEYWORDS = [\"WHERE\", \"GROUP_BY\", \"HAVING\", \"ORDER_BY\", \"SELECT\"]\n",
        "EXTRA_OPS = [\"NOT_IN\", \"IN\", \"BETWEEN\", \"=\"]\n",
        "COUNT = \"COUNT\"\n",
        "OTHER_KEYWORDS = [\"LIMIT\"] #AGG, OP, DASC, OR, =\n",
        "NEST_KEYWORDS = [\"EXCEPT\", \"UNION\", \"INTERSECT\"]\n",
        "\n",
        "def get_labels(sql_pattern):\n",
        "    sql_tokens = sql_pattern.replace(\"GROUP BY\", \"GROUP_BY\").replace(\"ORDER BY\", \"ORDER_BY\").replace(\"NOT IN\", \"NOT_IN\").split(\" \")\n",
        "    columns = {}\n",
        "    cur_nest = \"\"\n",
        "    cur_struct = \"\"\n",
        "    cur_len = len(sql_tokens)\n",
        "    select_count = 0\n",
        "    for i, tok in enumerate(sql_tokens):\n",
        "        if tok in NEST_KEYWORDS:\n",
        "            if cur_nest == \"\" or cur_nest == \"OP_SEL\":\n",
        "                cur_nest = tok\n",
        "            else:\n",
        "                cur_nest = cur_nest + \" \" + tok\n",
        "        elif tok in STRUCT_KEYWORDS:\n",
        "            cur_struct = tok\n",
        "            if tok == \"SELECT\":\n",
        "                select_count += 1\n",
        "                if select_count > 1 and cur_nest == \"\":\n",
        "                    cur_nest = \"OP_SEL\"\n",
        "        elif \"COLUMN\" in tok or \"*\" == tok:\n",
        "            if tok not in columns.keys():\n",
        "                columns[tok] = []\n",
        "            # SELECT {COLUMN0}\n",
        "            # SELECT {COLUMN0} , {COLUMN1}\n",
        "            # SELECT {AGG0} ( {COLUMN0} )\n",
        "            # SELECT {COLUMN0} {FROM} WHERE {COLUMN1} {OP} ( SELECT {AGG0} ( {COLUMN1} ) {FROM} ) AND {COLUMN2} {OP0} {VALUE0}\n",
        "            if cur_struct == \"SELECT\":\n",
        "                if \",\" == sql_tokens[i-1] or \"SELECT\" == sql_tokens[i-1]:\n",
        "                    columns[tok].append(cur_nest + \" \" + cur_struct)\n",
        "                elif \"(\" == sql_tokens[i-1]:\n",
        "                    columns[tok].append(cur_nest + \" \" + cur_struct + \" \" + sql_tokens[i-2])\n",
        "                else:\n",
        "                    print(\"\\nWarning: unexcepted SELECT format\")\n",
        "                    print(sql_pattern)\n",
        "            # WHERE {COLUMN} {OP}\n",
        "            # WHERE {COLUMN2} {OP0}\n",
        "            # WHERE OR {COLUMN2} {OP0}\n",
        "            # WHERE {COLUMN2} BETWEEN\n",
        "            elif cur_struct == \"WHERE\":\n",
        "                assert \"OP\" in sql_tokens[i+1] or sql_tokens[i+1] in EXTRA_OPS\n",
        "                last_tok = sql_tokens[i-1]\n",
        "                if \"OR\" == last_tok or (i+3 < cur_len and \"OR\" == sql_tokens[i+3]):\n",
        "                    columns[tok].append(cur_nest + \" \" + cur_struct + \" OR \" + sql_tokens[i+1])\n",
        "                elif \"WHERE\" == last_tok or \"AND\" == last_tok:\n",
        "                    columns[tok].append(cur_nest + \" \" + cur_struct + \" \" + sql_tokens[i+1])\n",
        "                else:\n",
        "                    print(\"\\nWarning: unexcepted WHERE format\")\n",
        "            # GROUP BY {COLUMN0} , {COLUMN0}\n",
        "            elif cur_struct == \"GROUP_BY\":\n",
        "                columns[tok].append(cur_nest + \" \" + cur_struct)\n",
        "            # HAVING COUNT ( * ) {OP0}\n",
        "            # HAVING {AGG0} ( {COLUMN2} ) {OP0}\n",
        "            elif cur_struct == \"HAVING\":\n",
        "                last_tok = sql_tokens[i-1]\n",
        "                if last_tok != \"(\" and not (\"AGG\" in sql_tokens[i-2] or COUNT == sql_tokens[i-2]):\n",
        "                    print(\"\\nWarning: unexcepted HAVING format\")\n",
        "                columns[tok].append(cur_nest + \" \" + cur_struct + \" \" + sql_tokens[i-2] + \" \" + sql_tokens[i+2])\n",
        "            # ORDER BY COUNT ( * ) {DASC} LIMIT\n",
        "            # ORDER BY COUNT ( * ) {DASC}\n",
        "            # ORDER BY {COLUMN1} {DASC} LIMIT\n",
        "            # ORDER BY {COLUMN1} LIMIT\n",
        "            # ORDER BY {COLUMN1} , {COLUMN1} {DASC} LIMIT\n",
        "            # ORDER BY {COLUMN1} {DASC} if no DASC then is ASC\n",
        "            elif cur_struct == \"ORDER_BY\":\n",
        "                last_tok = sql_tokens[i-1]\n",
        "                if last_tok == \"(\":\n",
        "                    dasc_tok = \"{DASC}\"\n",
        "                    limit_tok = \"\"\n",
        "                    if sql_tokens[i+2] != \"{DASC}\":\n",
        "                        dasc_tok = \"ASC\"\n",
        "                        if sql_tokens[i+2] == \"LIMIT\":\n",
        "                            limit_tok = \"LIMIT\"\n",
        "                    elif i+3 < cur_len and sql_tokens[i+3] == \"LIMIT\":\n",
        "                        limit_tok = \"LIMIT\"\n",
        "                        \n",
        "                    columns[tok].append(cur_nest + \" \" + cur_struct + \" \" + sql_tokens[i-2] + \" \" + dasc_tok + \" \" + limit_tok)\n",
        "                elif last_tok == \"ORDER_BY\" or last_tok == \",\":\n",
        "                    dasc_tok = \"ASC\"\n",
        "                    limit_tok = \"\"\n",
        "                    # small dirty pass\n",
        "                    if i+1 < cur_len and sql_tokens[i+1] == \"{DASC}\":\n",
        "                        dasc_tok = \"{DASC}\"\n",
        "                        if i+2 < cur_len and sql_tokens[i+2] == \"LIMIT\":\n",
        "                            limit_tok = \"LIMIT\"\n",
        "                    elif i+1 < cur_len and sql_tokens[i+1] == \"LIMIT\":\n",
        "                        limit_tok = \"LIMIT\"\n",
        "                    \n",
        "                    columns[tok].append(cur_nest + \" \" + cur_struct + \" \" + dasc_tok + \" \" + limit_tok)\n",
        "        \n",
        "            else:\n",
        "                print(\"\\n------------Warning: unexcepted COLUMN label format\")\n",
        "    \n",
        "    column_labels = {}\n",
        "    for col, labels in columns.items():\n",
        "        label_str = \" \".join([l.strip() for l in labels])\n",
        "        column_labels[col] = label_str\n",
        "        \n",
        "    return column_labels\n"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "c98zsP_1NjZu"
      },
      "source": [
        "##### Populate one example for a given database based on a given nl-SQL template and sql component mapping"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "XtU3L09fNjZu"
      },
      "source": [
        "def populate_one(db, templates, templates_one, sql_components):\n",
        "    \"\"\"\n",
        "    'P0=P1==', 'P0=P1=P2==', 'P0==', 'P1==', 'P0=>', 'P0=<', '{AGG0}=MAX', '{AGG0}=MIN'\n",
        "    'T0-T1-JOIN', 'T0-T1-NO-JOIN', \n",
        "    'C0-id',, 'C2-id', , 'C1-id',  'C3-T1'\n",
        "    \"\"\"\n",
        "    if len(db) > 1:\n",
        "        template = random.choice(templates)\n",
        "    else:\n",
        "        template = random.choice(templates_one)\n",
        "        \n",
        "    sql_constraints = template['SQL constraints']\n",
        "    sql_pattern = template[\"SQL pattern\"]\n",
        "    question, q_constraints = random.choice(template[\"questions\"])\n",
        "    constraints = list(set(sql_constraints + q_constraints))\n",
        "\n",
        "    slots, columns, ops, vals, aggs, dasc = get_sql_slots(sql_pattern)\n",
        "    slot_values, columns, skip_db_with_one_table = process_constraints(constraints, columns, slots)\n",
        "\n",
        "    q_slots = get_q_slots(question)\n",
        "    q_slot_values = {}   \n",
        "\n",
        "    # 1 process ops - update columns and values constraints\n",
        "    for op, colv in ops.items():\n",
        "        if colv[0] == \"*\":\n",
        "            if op not in slot_values.keys():\n",
        "                op_val = random.choice([\">\", \"<\", \">=\", \"<=\", \"=\"])\n",
        "                slot_values[op] = op_val\n",
        "                if len(colv) == 2:\n",
        "                    slot_values[colv[1]] = random.randint(1, 10)\n",
        "        else:\n",
        "            if colv[0] not in columns.keys():\n",
        "                print(\"\\n-----colv[0] not in columns.keys(): \")\n",
        "                print(columns.keys())\n",
        "                print(ops)\n",
        "            assert colv[0] in columns.keys()\n",
        "            if op not in slot_values.keys():\n",
        "                if random.random() < 0.4:\n",
        "                    op_val = \"=\"\n",
        "                else:\n",
        "                    op_val = random.choice(OPS)\n",
        "                slot_values[op] = op_val\n",
        "                if op_val in [\">\", \"<\", \">=\", \"<=\"]:\n",
        "                    columns[colv[0]].append(\"number\")\n",
        "            if len(colv) == 2:\n",
        "                columns[colv[0]].append(colv[1])\n",
        "    \n",
        "    # 2 process columns\n",
        "    random.shuffle(db)\n",
        "    table_0, table_1 = None, None\n",
        "    table_label_0 = \"\"\n",
        "    table_label_1 = \"\"\n",
        "    use_table_1 = False\n",
        "    \n",
        "    if \"{COLUMN0}\" in columns.keys() or \"{TABLE0}\" in q_slots:\n",
        "        table_label_0 = \"SELECT\"\n",
        "        \n",
        "    if len(db) >= 2:\n",
        "        table_0, table_1 = db[:2]\n",
        "        if \"{TABLE1}\" in q_slots:\n",
        "            table_label_1 = \"SELECT\"\n",
        "            if \"{TABLE0}\" in q_slots:\n",
        "                # p<0.5 from T0, T1 AND to SELECT T1 *\n",
        "                # otherwise all from T0 AND to SELECT T1 *\n",
        "                if random.random() < 0.5:\n",
        "                    use_table_1 = True                 \n",
        "            else:\n",
        "                # p<0.4 all from T0 \n",
        "                # AND to SELECT T1 *\n",
        "                if random.random() < 0.6:\n",
        "                    use_table_1 = True\n",
        "                    if \"{COLUMN1}\" in columns.keys():\n",
        "                        table_label_1 = \"SELECT\"\n",
        "        else:\n",
        "            # p<0.5 from T0, T1 AND to SELECT T1 *\n",
        "            # otherwise all from T0, NOT to SELECT T1 *\n",
        "            if random.random() < 0.5:\n",
        "                use_table_1 = True\n",
        "                if \"{COLUMN1}\" in columns.keys():\n",
        "                    table_label_1 = \"SELECT\"\n",
        "    else:\n",
        "        table_0, table_1 = db[0], db[0]\n",
        "    \n",
        "    T0 = table_0[\"name\"]\n",
        "    T1 = table_1[\"name\"]\n",
        "    columns_inf_0 = list(zip(table_0[\"columns\"], table_0[\"column_types\"], table_0[\"values\"]))[1:]\n",
        "    if use_table_1:\n",
        "        columns_inf_1 = list(zip(table_1[\"columns\"], table_1[\"column_types\"], table_1[\"values\"]))[1:]\n",
        "        \n",
        "    if \"{COLUMN0}\" in columns.keys():\n",
        "        col_0, value_0 = gen_col_info(\"{COLUMN0}\", columns, columns_inf_0)\n",
        "        slot_values[\"{COLUMN0}\"] = col_0\n",
        "        if value_0 is not None:\n",
        "            for k, v in value_0:\n",
        "                slot_values[k] = v\n",
        "        if len(columns_inf_0) > 2:\n",
        "            columns_inf_0 = [(col, ctype, val) for col, ctype, val in columns_inf_0 if col != col_0]\n",
        "\n",
        "    if use_table_1:\n",
        "        columns_input = columns_inf_1\n",
        "    else:\n",
        "        columns_input = columns_inf_0\n",
        "            \n",
        "    if \"{COLUMN1}\" in columns.keys():\n",
        "        col_1, value_1 = gen_col_info(\"{COLUMN1}\", columns, columns_input)\n",
        "        slot_values[\"{COLUMN1}\"] = col_1\n",
        "        if value_1 is not None:\n",
        "            for k, v in value_1:\n",
        "                slot_values[k] = v\n",
        "        columns_input_org = columns_input\n",
        "        if len(columns_input) > 3:\n",
        "            columns_input = [(col, ctype, val) for col, ctype, val in columns_input if col != col_1]\n",
        "        if len(columns_input) < 2:\n",
        "            columns_input = columns_input_org\n",
        "        \n",
        "    if \"{COLUMN2}\" in columns.keys():\n",
        "        col_2, value_2 = gen_col_info(\"{COLUMN2}\", columns, columns_input)\n",
        "        slot_values[\"{COLUMN2}\"] = col_2\n",
        "        if value_2 is not None:\n",
        "            for k, v in value_2:\n",
        "                slot_values[k] = v\n",
        "        columns_input_org = columns_input\n",
        "        if len(columns_input) > 2:\n",
        "            columns_input = [(col, ctype, val) for col, ctype, val in columns_input if col != col_2]\n",
        "        if len(columns_input) < 2:\n",
        "            columns_input = columns_input_org\n",
        "                \n",
        "    if \"{COLUMN3}\" in columns.keys():\n",
        "        col_3, value_3 = gen_col_info(\"{COLUMN3}\", columns, columns_input)\n",
        "        slot_values[\"{COLUMN3}\"] = col_3\n",
        "        if value_3 is not None:\n",
        "            for k, v in value_3:\n",
        "                slot_values[k] = v\n",
        "        \n",
        "    # 3 aggs\n",
        "    for agg in aggs.keys():\n",
        "        if agg not in slot_values.keys():\n",
        "            slot_values[agg] = random.choice([\"MAX\", \"MIN\", \"SUM\", \"AVG\"])\n",
        "    # 4 values\n",
        "    NUM = 1\n",
        "    for val, cond in vals.items():\n",
        "        assert val not in slot_values.keys()\n",
        "        if cond == \"integer\":\n",
        "            if random.random() < 0.5:\n",
        "                slot_values[val] = 1\n",
        "            else:\n",
        "                NUM = random.randint(2, 10)\n",
        "                slot_values[val] = NUM\n",
        "        else:\n",
        "            slot_values[val] = random.randint(0, 100)\n",
        "                    \n",
        "    # 5 dasc - true\n",
        "    if dasc == True:\n",
        "        slot_values[\"{DASC}\"] = random.choice([\"ASC\", \"DESC\"])\n",
        "    \n",
        "    # 6 check if all sql slot values are done\n",
        "    if len(slots) != len(slot_values):\n",
        "        print(\"\\nlen(slots) != len(slot_values)\")\n",
        "        print(\"sql_pattern: \", sql_pattern)\n",
        "        print(\"slots: \", slots)\n",
        "        print(\"slot_values: \", slot_values.keys())\n",
        "    assert len(slots) == len(slot_values)\n",
        "    \n",
        "    # 7 for the questions slots:\n",
        "    for qs in q_slots:\n",
        "        if qs == \"{TABLE0}\":\n",
        "            q_slot_values[\"{TABLE0}\"] = T0\n",
        "        elif qs == \"{TABLE1}\":\n",
        "            q_slot_values[\"{TABLE1}\"] = T1\n",
        "        elif \"SC\" in qs:\n",
        "            sc = slot_values[\"{DASC}\"]\n",
        "            if \"SC\" == qs:\n",
        "                q_slot_values[qs] = random.choice(sql_components[\"SC\"][sc])\n",
        "            elif \"SC_COL_LIMIT\" == qs:\n",
        "                if NUM > 1:\n",
        "                    sc =  sc + \"_NUM\"\n",
        "                    q_slot_values[qs] = random.choice(sql_components[\"SC_COL_LIMIT\"][sc]).replace(\"[NUM]\", str(NUM))\n",
        "                else:\n",
        "                    q_slot_values[qs] = random.choice(sql_components[\"SC_COL_LIMIT\"][sc])\n",
        "            elif \"SC_COL_COUNT_LIMIT\" in qs:\n",
        "                sc_type = qs.replace(\"SC_COL_COUNT_LIMIT\", \"\")\n",
        "                if NUM > 1:\n",
        "                    sc =  sc + \"_NUM\" + sc_type\n",
        "                    q_slot_values[qs] = random.choice(sql_components[\"SC_COL_COUNT_LIMIT\"][sc]).replace(\"[NUM]\", str(NUM))\n",
        "                else:\n",
        "                    sc =  sc + sc_type\n",
        "                    q_slot_values[qs] = random.choice(sql_components[\"SC_COL_COUNT_LIMIT\"][sc])\n",
        "            else:\n",
        "                if \"-\" not in qs:\n",
        "                    print(\"qs wrong\", qs)\n",
        "                assert \"-\" in qs\n",
        "                if \"C1\" in qs:\n",
        "                    sc_col = slot_values[\"{COLUMN1}\"]\n",
        "                elif \"C2\" in qs:\n",
        "                    sc_col = slot_values[\"{COLUMN2}\"]\n",
        "                q_slot_values[qs] = random.choice(sql_components[\"SC_COL\"][sc]).replace(\"[COL]\", sc_col)\n",
        "        else:\n",
        "            if qs not in slot_values.keys():\n",
        "                print(\"qs not in sv: \", qs)\n",
        "                print(\"sql_pattern: \", sql_pattern)\n",
        "                print(\"slot_values: \", slot_values)\n",
        "            assert qs in slot_values.keys()\n",
        "            if \"OP\" in qs:\n",
        "                q_slot_values[qs] = random.choice(sql_components[\"OP\"][slot_values[qs]])\n",
        "            elif \"AGG\" in qs:\n",
        "                q_slot_values[qs] = random.choice(sql_components[\"AGG\"][slot_values[qs]])\n",
        "            elif \"COLUMN\" in qs:\n",
        "                q_slot_values[qs] = \" \".join(slot_values[qs].split(\" \")[1:6])\n",
        "            elif \"VALUE\" in qs:\n",
        "                q_slot_values[qs] = \" \".join(str(slot_values[qs]).split(\" \")[:5])\n",
        "            else:\n",
        "                print(\"\\nWarning: some q slot type not considered!\")\n",
        "                print(qs)\n",
        "    \n",
        "    # 8 check if all question slots are processed\n",
        "    assert len(q_slots) == len(q_slot_values)\n",
        "    \n",
        "    # 9 generate final SQL-question pair\n",
        "    question_gen = replace_dict(question, q_slot_values)\n",
        "    \n",
        "    \n",
        "    # 10 generate column labels\n",
        "    slot_values_new = {}\n",
        "    for sl, vl in slot_values.items():\n",
        "        if \"COLUMN\" in sl:\n",
        "            slot_values_new[sl] = \"_=_\".join(vl.split(\" \"))\n",
        "        else:\n",
        "            slot_values_new[sl] = vl\n",
        "            \n",
        "    column_labels = get_labels(sql_pattern)\n",
        "    column_lables_real = {}\n",
        "    for col, label in column_labels.items():\n",
        "        if col != \"*\":\n",
        "            col = slot_values[col]\n",
        "        for slot, value in slot_values.items():\n",
        "            label = label.replace(slot, str(value))\n",
        "        column_lables_real[col] = label\n",
        "    \n",
        "    # also add labels for table column * \n",
        "    if table_label_0 != \"\":\n",
        "        column_lables_real[table_0[\"columns\"][0]] = table_label_0\n",
        "    if table_label_1 != \"\":\n",
        "        column_lables_real[table_1[\"columns\"][0]] = table_label_1\n",
        "\n",
        "    sql_gen = replace_dict(sql_pattern.replace(\" {FROM}\", \"\"), slot_values_new)\n",
        "    \n",
        "    return (sql_gen, question_gen, column_lables_real)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "wFbIs5uPNjZv"
      },
      "source": [
        "##### generatee examples for all databases"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "JtglmS1HNjZv"
      },
      "source": [
        "# let's start data augmentation!\n",
        "def augment_db(db, templates, templates_one_table, sql_components, aug_limit):\n",
        "    count = 0\n",
        "    augment_pairs = []\n",
        "    while count < aug_limit:\n",
        "        sql_gen, question_gen, column_lables = populate_one(db, templates, templates_one_table, sql_components)\n",
        "        augment_pairs.append((question_gen, sql_gen, column_lables))\n",
        "        count += 1\n",
        "    \n",
        "    return augment_pairs\n",
        "    \n",
        "\n",
        "def augment_all_dbs(dbs, templates, templates_one_table, sql_components, aug_limit):\n",
        "    augment_data = {}\n",
        "    for idx, db in enumerate(dbs):\n",
        "        if idx % 10000 == 0:\n",
        "            print(\"processed: \", idx)\n",
        "        db_cols = [\"*\"]\n",
        "        db_values = [\"\"]\n",
        "        for tab in db:\n",
        "            db_cols.extend(tab[\"columns\"])\n",
        "            db_values.extend(tab[\"values\"])\n",
        "        schema_str = \" </s> \".join(db_cols)\n",
        "        values_str = \" </s> \".join([str(k) for k in db_values])\n",
        "        schema_str = schema_str + \" |-| \" + values_str\n",
        "        augment_pairs = augment_db(db, templates, templates_one_table, sql_components, aug_limit)\n",
        "        augment_data[schema_str] = augment_pairs\n",
        "    \n",
        "    return augment_data"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "scrolled": true,
        "id": "i6odlYkSNjZv"
      },
      "source": [
        "augment_data_webtable = augment_all_dbs(webtable_dbs, templates, templates_one_table, sql_components, 5)"
      ],
      "execution_count": 3,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "4KUEGzJoNjZv"
      },
      "source": [
        "##### Map SQL labels of all augmented examples into numeric labels"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "11AlIs5TNjZv"
      },
      "source": [
        "### process label prints for each column\n",
        "def get_label_map(data):\n",
        "    label_dict = defaultdict(int)\n",
        "    for schema_str, example_list in data.items():\n",
        "        for example in example_list:\n",
        "            (question, sql, col_labels) = example\n",
        "            for val in col_labels.values():\n",
        "                label_dict[val] += 1\n",
        "    label_list = sorted(label_dict.items(), key=lambda kv: kv[1], reverse=True)\n",
        "    label_map = {}\n",
        "    count = 1\n",
        "    for label, _ in label_list:\n",
        "        label_map[label] = count\n",
        "        count += 1\n",
        "    \n",
        "    return label_map\n",
        "\n",
        "def map_labels(data, label_map, is_dev=False):\n",
        "    data_new = {}\n",
        "    skip_count = 0\n",
        "    count = 0\n",
        "    for schema_str, exs in data.items():\n",
        "        count += 1\n",
        "        if count % 100000 == 0:\n",
        "            print(\"processed: \", count)\n",
        "        data_new[schema_str] = []\n",
        "        for ex in exs:\n",
        "            skip = False\n",
        "            label_dict = ex[2]\n",
        "            label_dict_new = {}\n",
        "            for col, label in label_dict.items():\n",
        "                if label in label_map.keys():\n",
        "                    label_dict_new[col] = label_map[label]\n",
        "                else:\n",
        "                    skip = True\n",
        "                    skip_count += 1\n",
        "                    #else just skip\n",
        "#             print(label_dict_new)\n",
        "            if not skip:\n",
        "                data_new[schema_str].append((ex[0], ex[1], ex[2], label_dict_new))   \n",
        "    \n",
        "    print(\"skip_count: \", skip_count)\n",
        "    return data_new"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "1H6_IVIkNjZv",
        "outputId": "ec09dd92-b856-4f12-edde-bea2800a40ae"
      },
      "source": [
        "import pickle\n",
        "label_map_file = \"data/labels_map.pkl\"\n",
        "# label_map_final = get_label_map(fine_tuning_data_augment_with_dev_wikisql)\n",
        "# with open(label_map_file, 'wb') as fp:\n",
        "#     pickle.dump(label_map_final, fp, protocol=pickle.HIGHEST_PROTOCOL)\n",
        "with open(label_map_file, 'rb') as fp:\n",
        "    label_map = pickle.load(fp)\n",
        "    \n",
        "augment_data_webtable = map_labels(augment_data_webtable, label_map)"
      ],
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "skip_count:  0\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "YmUhzNhzNjZw"
      },
      "source": [
        "##### Write and save file"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "tXaEx1PxNjZw"
      },
      "source": [
        "MAX_TOKEN_LEN = 150\n",
        "def write_final_file(augment_data):\n",
        "    data_json = []\n",
        "    skip_count = 0\n",
        "    line_count = 0\n",
        "    dup_count = 0\n",
        "    pro_count = 0\n",
        "    for schema_str, exs in augment_data.items():\n",
        "        for ex in exs:\n",
        "            line_count += 1\n",
        "            if line_count % 100000 == 0:\n",
        "                print(\"processed: \", line_count)\n",
        "            question, sql, label_strs, label_ints = ex\n",
        "            col_str, val_str = schema_str.split(\" |-| \")\n",
        "            colns = col_str.split(\" </s> \")\n",
        "            values = val_str.split(\" </s> \")\n",
        "            assert len(colns) == len(values)\n",
        "            cols = []\n",
        "            label_num = len(label_ints)\n",
        "            label_count = 0\n",
        "            for idx, coln in enumerate(colns):\n",
        "                col = {}\n",
        "                col[\"name\"] = coln\n",
        "                col[\"value\"] = values[idx]\n",
        "                if coln != \"*\":\n",
        "                    col[\"name\"] = \" \".join(coln.split(\" \")[1:])\n",
        "                col[\"label_int\"] = 0\n",
        "                if coln in label_ints.keys():\n",
        "                    col[\"label_int\"] = label_ints[coln]\n",
        "                    label_count += 1\n",
        "                cols.append(col)\n",
        "            \n",
        "            assert label_count >= label_num\n",
        "            if label_count > label_num:\n",
        "                dup_count += 1\n",
        "#                 print(\"\\nWARNING: deplicated columns!\")\n",
        "#                 print(\"label_ints: \", label_ints)\n",
        "#                 print(\"colns: \", colns)\n",
        "            \n",
        "            col_list = []\n",
        "            label_list = []\n",
        "            value_list = []\n",
        "            col_count = 0\n",
        "            for i, col in enumerate(cols):\n",
        "                if col_count > 40 and col[\"label_int\"] == 0:\n",
        "                    continue\n",
        "                col_list.append(col[\"name\"])\n",
        "                value_list.append(col[\"value\"])\n",
        "                col_count += 1\n",
        "                label_list.append(int(col[\"label_int\"]))\n",
        "            assert len(col_list) == len(value_list)\n",
        "            \n",
        "            label_str = \" \".join([str(k) for k in label_list])\n",
        "            q_col_str = \"<s> \" + question.lower() + \" </s> \" + \" </s> \".join(col_list).strip() + \" </s> \"\n",
        "            caption = q_col_str + \" ||| \" + label_str\n",
        "            tokens = tokenizer.tokenize(q_col_str)\n",
        "            if len(tokens) > MAX_TOKEN_LEN:\n",
        "                continue\n",
        "                \n",
        "            data_json.append({\"question\": question.lower(),\n",
        "                              \"columns\": col_list,\n",
        "                              \"rows\": [value_list],\n",
        "                              \"column_labels\": label_list\n",
        "                             })\n",
        "            pro_count += 1\n",
        "\n",
        "    print(\"total line: \", line_count)\n",
        "    print(\"skiped line: \", skip_count)\n",
        "    print(\"dup line: \", dup_count)\n",
        "    print(\"pro line: \", pro_count)\n",
        "    \n",
        "    return data_json"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "YU39fFVjNjZw",
        "outputId": "f86ca3fa-925e-4b98-dce6-895af73f45d4"
      },
      "source": [
        "data_json = write_final_file(augment_data_webtable)"
      ],
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "processed:  100000\n",
            "processed:  200000\n",
            "processed:  300000\n",
            "processed:  400000\n",
            "total line:  418460\n",
            "skiped line:  0\n",
            "dup line:  2237\n",
            "pro line:  413173\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "mE3FUb-TNjZw",
        "outputId": "d7a0f7ee-604e-4fc2-f4ce-089164a3edf4"
      },
      "source": [
        "data_json[2000]"
      ],
      "execution_count": null,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "{'question': 'find the winner in which the in total prize is ignore $ 11,111.11 .',\n",
              " 'columns': ['*',\n",
              "  'online poker *',\n",
              "  'date',\n",
              "  'event #',\n",
              "  'event',\n",
              "  'winner',\n",
              "  'prize',\n",
              "  'entries',\n",
              "  'prize pool',\n",
              "  'elapsed time'],\n",
              " 'rows': [['',\n",
              "   'all',\n",
              "   'may 1',\n",
              "   '1l',\n",
              "   \"$ 11 no limit hold'em\",\n",
              "   'willrobrobu',\n",
              "   '$ 11,111.11',\n",
              "   '11,111',\n",
              "   '$ 111,110',\n",
              "   '[ 1-day ] event']],\n",
              " 'column_labels': [0, 1, 0, 0, 0, 2, 175, 0, 0, 0]}"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 33
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "t6YVWoACNjZx"
      },
      "source": [
        "with open('wikitable_augment.json', 'w') as outfile:\n",
        "    json.dump(data_json, outfile)"
      ],
      "execution_count": null,
      "outputs": []
    }
  ]
}