{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from google.colab import drive\n",
    "drive.mount('/content/drive')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tqdm import tqdm \n",
    "import pandas as pd\n",
    "import torch\n",
    "\n",
    "import nltk\n",
    "import re\n",
    "\n",
    "nltk.download('wordnet')\n",
    "nltk.download('universal_tagset')\n",
    "nltk.download('averaged_perceptron_tagger')\n",
    "\n",
    "from nltk.corpus import wordnet as wn\n",
    "\n",
    "import random\n",
    "\n",
    "import nltk\n",
    "nltk.download('stopwords')\n",
    "from nltk.corpus import stopwords\n",
    "\n",
    "nltk.download('punkt')\n",
    "from nltk.tokenize import word_tokenize \n",
    "\n",
    "\n",
    "def get_word_count(text):\n",
    "\n",
    "  tokens = word_tokenize(text)\n",
    "\n",
    "  nonPunct = re.compile('.*[A-Za-z0-9].*')  # must contain a letter or digit\n",
    "  \n",
    "  filtered = [w for w in tokens if nonPunct.match(w)]\n",
    "\n",
    "  return len(filtered)\n",
    "\n",
    "stop_words = set(stopwords.words('english'))\n",
    "\n",
    "\n",
    "def get_synonyms(word):\n",
    "    \"\"\"\n",
    "    Get synonyms of a word\n",
    "    \"\"\"\n",
    "    synonyms = set()\n",
    "    if word[1]==\"NOUN\":\n",
    "      param = wn.NOUN\n",
    "    elif word[1]==\"VERB\":\n",
    "      param = wn.VERB\n",
    "    elif word[1]==\"ADV\":\n",
    "      param=wn.ADV\n",
    "    elif word[1]==\"ADJ\":\n",
    "      param=wn.ADJ\n",
    "    else:\n",
    "      ## word not considered for syn rep\n",
    "      return []\n",
    "    for syn in wn.synsets(word[0], pos=param): \n",
    "        for l in syn.lemmas(): \n",
    "            synonym = l.name().replace(\"_\", \" \").replace(\"-\", \" \").lower()\n",
    "            synonym = \"\".join([char for char in synonym if char in ' qwertyuiopasdfghjklzxcvbnm'])\n",
    "            synonyms.add(synonym) \n",
    "\n",
    "    \n",
    "    if word[0] in synonyms:\n",
    "        synonyms.remove(word[0])\n",
    "    # print(\"INSIDE GET_SYNONYMS: \")\n",
    "    # print(synonyms) \n",
    "    return list(synonyms)\n",
    "\n",
    "\n",
    "def synonym_replacement(words, n, stop_words):\n",
    "    fail_count=0\n",
    "    #words = words.split()\n",
    "    \n",
    "    new_words = words.copy()\n",
    "    random_word_list = []\n",
    "    for word in words:\n",
    "      if word[1] in [\"NOUN\", \"ADJ\", \"ADV\", \"VERB\"]:\n",
    "        random_word_list.append(word)\n",
    "    #random_word_list = list(set([word for word in words if word not in stop_words]))\n",
    "    random.shuffle(random_word_list)\n",
    "    num_replaced = 0\n",
    "    \n",
    "    for random_word in random_word_list:\n",
    "      synonyms = get_synonyms(random_word)\n",
    "        \n",
    "      if len(synonyms) >= 1:\n",
    "          synonym = random.choice(synonyms)\n",
    "          new_words = [synonym if word == random_word else word[0] for word in words]\n",
    "          num_replaced += 1\n",
    "        \n",
    "      else:\n",
    "        new_words = [word[0] for word in words]  ##no possible synonyms, so just use old words as new\n",
    "        \n",
    "      if num_replaced >= n: #only replace up to n words\n",
    "          break\n",
    "\n",
    "    try:\n",
    "      sentence = ' '.join(new_words)\n",
    "    except TypeError as e:\n",
    "      print(e)\n",
    "      print('new_words: ',new_words)\n",
    "\n",
    "      old_words = [word[0] for word in words]\n",
    "      sentence = ' '.join(old_words)\n",
    "      fail_count+=1\n",
    "\n",
    "    return sentence\n",
    "\n",
    "\n",
    "\n",
    "def augment_dataset(data_frame, output_filepath, percentage=0.2):\n",
    "\n",
    "  augmented_dataset = []\n",
    "\n",
    "  for article in tqdm(data_frame.itertuples()):\n",
    "\n",
    "    body_texts = article.text\n",
    "\n",
    "    if body_texts is None:\n",
    "      continue\n",
    "    body_texts = body_texts.split(\".\")\n",
    "\n",
    "    new_sent = []\n",
    "\n",
    "    for sent in body_texts:\n",
    "\n",
    "      word_count = get_word_count(sent)\n",
    "      n = int(word_count*percentage)\n",
    "      sent_tokens = word_tokenize(sent)\n",
    "      sent_with_pos = nltk.pos_tag(sent_tokens, tagset='universal')\n",
    "      new_sent.append(synonym_replacement(sent_with_pos, n, stop_words))\n",
    "\n",
    "    augmented_article = \". \".join(new_sent)\n",
    "    \n",
    "    augmented_dataset.append([article.text, augmented_article, article.label])\n",
    "  \n",
    "  augmented_frame = pd.DataFrame(augmented_dataset, columns=[\"text\",\"text_perturb\", \"label\"])\n",
    "\n",
    "  jsonl_data = augmented_frame.to_json(orient='records', lines=True)\n",
    "\n",
    "  with open(output_filepath, \"w\") as text_file:\n",
    "    text_file.write(jsonl_data)\n",
    "\n",
    "  return augmented_frame"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "data_path = \"google-drive-path-to-directory-containing-your-jsonl-files\"\n",
    "\n",
    "## how our files are stored: each of the train/valid/test jsonl files are split into 'real' (i.e, human-written) & 'fake' (i.e, AI-generated) samples in separate files.\n",
    "## each file is a jsonl file; one json object per line, with keys ['text', 'label']\n",
    "\n",
    "real_train = pd.read_json(data_path + \"real.train.jsonl\", lines=True, orient=\"records\")\n",
    "real_test = pd.read_json(data_path + \"real.test.jsonl\", lines=True, orient=\"records\")\n",
    "real_valid = pd.read_json(data_path + \"real.valid.jsonl\", lines=True, orient=\"records\")\n",
    "\n",
    "fake_train = pd.read_json(data_path + \"fake.train.jsonl\", lines=True, orient=\"records\")\n",
    "fake_test = pd.read_json(data_path + \"fake.test.jsonl\", lines=True, orient=\"records\")\n",
    "fake_valid = pd.read_json(data_path + \"fake.valid.jsonl\", lines=True, orient=\"records\")\n",
    "\n",
    "path = \"google-drive-path-to-directory-to-store-augmented-files\"\n",
    "\n",
    "## augmented jsonl files will now have 3 keys: ['text', 'text_perturb', 'label']\n",
    "\n",
    "\n",
    "augment_dataset(real_train, output_filepath=path+\"augmented_real.train.jsonl\", percentage=0.1)\n",
    "augment_dataset(real_test, output_filepath=path+\"augmented_real.test.jsonl\", percentage=0.1) \n",
    "augment_dataset(real_valid, output_filepath=path+\"augmented_real.valid.jsonl\", percentage=0.1)\n",
    "augment_dataset(fake_train, output_filepath=path+\"augmented_fake.train.jsonl\", percentage=0.1) \n",
    "augment_dataset(fake_test, output_filepath=path+\"augmented_fake.test.jsonl\", percentage=0.1)\n",
    "augment_dataset(fake_valid, output_filepath=path+\"augmented_fake.valid.jsonl\", percentage=0.1) "
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
