{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Data Collection\n",
    "\n",
    "1. Define Classes & Functions <br>\n",
    "    a. Get list of SEC docs for CIK<br>\n",
    "    b. Extract text, date, item numbers from each link<br>\n",
    "    c. Get price given ticker, data from AlphaVantage<br>\n",
    "    d. Get movement given ticker, date<br>\n",
    "    e. Get index movement<br>\n",
    "    f. Check if date is a weekday, and if necessary, adjust to Friday before<br>\n",
    "    g. Calculate dates for month before, quarter before, year before for historical movement calculations\n",
    "2. GET S&P 500 company info<br>\n",
    "3. Get list of 8K doc links <br>\n",
    "4. Download 8Ks & Stock Movements<br>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from bs4 import BeautifulSoup\n",
    "import datetime\n",
    "import unicodedata\n",
    "import requests\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from time import sleep\n",
    "import math\n",
    "from config import Config\n",
    "import dateutil.relativedelta\n",
    "import pandas_market_calendars as mcal\n",
    "import os\n",
    "import io\n",
    "import re\n",
    "from tqdm import tqdm\n",
    "import gc\n",
    "import ast"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 1 Define Functions and Classes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from bs4 import BeautifulSoup\n",
    "import datetime\n",
    "import unicodedata\n",
    "import requests\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from time import sleep\n",
    "import re\n",
    "\n",
    "class SEC_Extractor:\n",
    "    def get_doc_links(cik,ticker):\n",
    "        try:\n",
    "            base_url = \"https://www.sec.gov/cgi-bin/browse-edgar\"\n",
    "            inputted_cik = cik\n",
    "            payload = {\n",
    "                \"action\" : \"getcompany\",\n",
    "                \"CIK\" : inputted_cik,\n",
    "                \"type\" : \"8-K\",\n",
    "                \"output\":\"xml\",\n",
    "                \"dateb\" : \"20180401\",\n",
    "            }\n",
    "            sec_response = requests.get(url=base_url,params=payload)\n",
    "            soup = BeautifulSoup(sec_response.text,'lxml')\n",
    "            url_list = soup.findAll('filinghref')\n",
    "            html_list = []\n",
    "            # Get html version of links\n",
    "            for link in url_list:\n",
    "                link = link.string\n",
    "                if link.split(\".\")[len(link.split(\".\"))-1] == 'htm':\n",
    "                    txtlink = link + \"l\"\n",
    "                    html_list.append(txtlink)\n",
    "\n",
    "            doc_list = []\n",
    "            doc_name_list = []\n",
    "            # Get links for txt versions of files\n",
    "            for k in range(len(html_list)):\n",
    "                txt_doc = html_list[k].replace(\"-index.html\",\".txt\")\n",
    "                doc_name = txt_doc.split(\"/\")[-1]\n",
    "                doc_list.append(txt_doc)\n",
    "                doc_name_list.append(doc_name)\n",
    "                # Create dataframe of CIK, doc name, and txt link\n",
    "            df = pd.DataFrame(\n",
    "                {\n",
    "                \"cik\" : [cik]*len(html_list),\n",
    "                \"ticker\" : [ticker]*len(html_list),\n",
    "                \"txt_link\" : doc_list,\n",
    "                \"doc_name\": doc_name_list\n",
    "                }\n",
    "            )\n",
    "        except requests.exceptions.ConnectionError:\n",
    "                sleep(.1)\n",
    "        return df\n",
    "\n",
    "    # Extracts text and submission datetime from document link\n",
    "    def extract_text(link):\n",
    "        try:\n",
    "            r = requests.get(link)\n",
    "            #Parse 8-K document\n",
    "            filing = BeautifulSoup(r.content,\"html5lib\",from_encoding=\"ascii\")\n",
    "            #Extract datetime\n",
    "            try:\n",
    "                submission_dt = filing.find(\"acceptance-datetime\").string[:14]\n",
    "            except AttributeError:\n",
    "                    # Flag docs with missing data as May 1 2018 10AM\n",
    "                submission_dt = \"20180501100000\"\n",
    "            \n",
    "            submission_dt = datetime.datetime.strptime(submission_dt,\"%Y%m%d%H%M%S\")\n",
    "            #Extract HTML sections\n",
    "            for section in filing.findAll(\"html\"):\n",
    "                try:\n",
    "                    #Remove tables\n",
    "                    for table in section(\"table\"):\n",
    "                        table.decompose()\n",
    "                    #Convert to unicode\n",
    "                    section = unicodedata.normalize(\"NFKD\",section.text)\n",
    "                    section = section.replace(\"\\t\",\" \").replace(\"\\n\",\" \").replace(\"/s\",\" \").replace(\"\\'\",\"'\")            \n",
    "                except AttributeError:\n",
    "                    section = str(section.encode('utf-8'))\n",
    "            filing = \"\".join((section))\n",
    "        except requests.exceptions.ConnectionError:\n",
    "                sleep(10)\n",
    "        sleep(.1)\n",
    "\n",
    "        return filing, submission_dt\n",
    "\n",
    "    def extract_item_no(document):\n",
    "        pattern = re.compile(\"Item+ +\\d+[\\:,\\.]+\\d+\\d\")\n",
    "        item_list = re.findall(pattern,document)\n",
    "        return item_list\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Returns Dataframe of document links for a given CIK\n",
    "idx = pd.Index\n",
    "class FinDataExtractor:\n",
    "    def __init__(self):\n",
    "        # S&P 500 index data downloaded from Yahoo Finance GSPC\n",
    "        self.gspc_df = pd.read_csv(\"Data/Indexes/gspc.csv\",parse_dates=['Date'],index_col=\"Date\")\n",
    "        # Get VIX index data downloaded from Yahoo Finance\n",
    "        self.vix_df = pd.read_csv(\"Data/Indexes/vix.csv\",parse_dates=['Date'],index_col=\"Date\")\n",
    "        nyse = mcal.get_calendar('NYSE')\n",
    "        self.nyse_holidays = nyse.holidays().holidays\n",
    "        self.all_tickers_data = pd.read_pickle(\"Pickles/all_tickers_data.pkl\")\n",
    "        \n",
    "#Takes datetime object and ticker string, returns price (opening or closing)\n",
    "    def get_historical_movements(self,row,period):\n",
    "        ticker,release_date = row[0],row[1]\n",
    "\n",
    "       #1 Week\n",
    "        if period == \"week\":\n",
    "            e_start = release_date + datetime.timedelta(weeks=-1)\n",
    "            b_start = e_start\n",
    "\n",
    "            e_end = release_date + dateutil.relativedelta.relativedelta(days=-1)\n",
    "            b_end = e_end\n",
    "\n",
    "         #1 Month    \n",
    "        elif period == \"month\":\n",
    "            e_start = release_date + dateutil.relativedelta.relativedelta(months=-1)\n",
    "            b_start = e_start + dateutil.relativedelta.relativedelta(days=-5)\n",
    "\n",
    "            e_end = release_date + dateutil.relativedelta.relativedelta(days=-1)\n",
    "            b_end = release_date + dateutil.relativedelta.relativedelta(days=-6)\n",
    "\n",
    "        #1 Quarter\n",
    "        elif period == \"quarter\":\n",
    "            e_start = release_date + dateutil.relativedelta.relativedelta(months=-3)\n",
    "            b_start = e_start + dateutil.relativedelta.relativedelta(days=-10)\n",
    "\n",
    "            e_end = release_date + dateutil.relativedelta.relativedelta(days=-1)\n",
    "            b_end = release_date + dateutil.relativedelta.relativedelta(days=-11)\n",
    "\n",
    "        #1 Year\n",
    "        elif period == \"year\":\n",
    "            e_start = release_date + dateutil.relativedelta.relativedelta(years=-1)\n",
    "            b_start = e_start + dateutil.relativedelta.relativedelta(days=-20)\n",
    "\n",
    "            e_end = release_date + dateutil.relativedelta.relativedelta(days=-1)\n",
    "            b_end = release_date + dateutil.relativedelta.relativedelta(days=-21)\n",
    "        else:\n",
    "            raise KeyError\n",
    "\n",
    "        e_start = self.weekday_check(e_start)\n",
    "        b_start = self.weekday_check(b_start)\n",
    "        e_end = self.weekday_check(e_end)\n",
    "        b_end = self.weekday_check(b_end)\n",
    "\n",
    "        start_price = self.get_av_data(ticker=ticker,start_date = b_start, end_date = e_start)\n",
    "        end_price = self.get_av_data(ticker=ticker,start_date = b_end, end_date = e_end)\n",
    "        stock_change = self.calculate_pct_change(end_price,start_price)\n",
    "\n",
    "        start_index = self.get_index_price(start_date = b_start, end_date = e_start)\n",
    "        end_index = self.get_index_price(start_date = e_start, end_date = e_end)\n",
    "        index_change =  self.calculate_pct_change(end_index,start_index)\n",
    "\n",
    "        normalized = stock_change - index_change\n",
    "        return normalized\n",
    "\n",
    "    def get_av_data(self,ticker,start_date,end_date,market_open=False):\n",
    "        start_date = start_date.date()\n",
    "        end_date = end_date.date()\n",
    "\n",
    "        try:\n",
    "            if market_open == False:\n",
    "                price = self.all_tickers_data.xs(ticker,0).loc[end_date:start_date,\"adjusted_close\"].mean()\n",
    "            else:\n",
    "                price = self.all_tickers_data.xs(ticker,0).loc[end_date:start_date,\"open\"].mean()\n",
    "        except (KeyError,IndexError):\n",
    "            price = np.nan\n",
    "        return price\n",
    "\n",
    "    # Takes ticker, 8K release date, checks time of release and then calculate before and after price change\n",
    "    def get_change(self,row):\n",
    "        release_date = row['release_date']\n",
    "        ticker = row['ticker']\n",
    "        market_close = release_date.replace(hour=16,minute=0,second=0)\n",
    "        market_open = release_date.replace(hour=9,minute=30,second=0)\n",
    "\n",
    "    # If report is released after market hours, take change of start date close and release date open\n",
    "        if release_date > market_close:\n",
    "            start_date = release_date\n",
    "            end_date = release_date + datetime.timedelta(days=1)\n",
    "            end_date = self.weekday_check(end_date)\n",
    "\n",
    "            price_before_release = self.get_av_data(ticker,start_date,start_date,market_open=False)\n",
    "            price_after_release = self.get_av_data(ticker,end_date,end_date,market_open=True)\n",
    "\n",
    "            index_before_release = self.get_index_price(start_date,start_date,market_open=False)\n",
    "            index_after_release = self.get_index_price(end_date,end_date,market_open=True)\n",
    "\n",
    "            try:\n",
    "                vix = self.vix_df.loc[self.vix_df.index == np.datetime64(start_date.date()),\"Adj Close\"][0].item()\n",
    "            except IndexError:\n",
    "                vix = np.nan\n",
    "\n",
    "        # If report is released before market hours, take change of start date's close and release date's open\n",
    "        elif release_date < market_open:\n",
    "            start_date = release_date + datetime.timedelta(days=-1)\n",
    "            start_date = self.weekday_check(start_date)\n",
    "            end_date = release_date\n",
    "\n",
    "            price_before_release = self.get_av_data(ticker,start_date,start_date,market_open=False)\n",
    "            price_after_release = self.get_av_data(ticker,end_date,end_date,market_open=True) \n",
    "\n",
    "            index_before_release = self.get_index_price(start_date,start_date,market_open=False)\n",
    "            index_after_release = self.get_index_price(end_date,end_date,market_open=True)\n",
    "            try:\n",
    "                vix = self.vix_df.loc[self.vix_df.index == np.datetime64(start_date.date()),\"Adj Close\"][0].item()\n",
    "            except IndexError:\n",
    "                vix = np.nan\n",
    "        # If report is released during market hours, use market close\n",
    "        else:\n",
    "            start_date = release_date\n",
    "            end_date = release_date\n",
    "            price_before_release = self.get_av_data(ticker,start_date,start_date,market_open=True)\n",
    "            price_after_release = self.get_av_data(ticker,end_date,end_date,market_open=False)\n",
    "\n",
    "            index_before_release = self.get_index_price(start_date,start_date,market_open=True)\n",
    "            index_after_release = self.get_index_price(end_date,end_date,market_open=False)\n",
    "            \n",
    "            try:\n",
    "                vix = self.vix_df.loc[self.vix_df.index == np.datetime64(start_date.date()),\"Open\"][0].item()\n",
    "            except IndexError:\n",
    "                vix = np.nan\n",
    "                \n",
    "        price_pct_change = self.calculate_pct_change(price_after_release,price_before_release)\n",
    "        index_pct_change = self.calculate_pct_change(index_after_release,index_before_release)\n",
    "        normalized_change = price_pct_change - index_pct_change\n",
    "\n",
    "        return normalized_change, vix\n",
    "\n",
    "    def get_index_price(self,start_date,end_date,market_open=False):\n",
    "        try:\n",
    "            if market_open == True:\n",
    "                price = self.gspc_df.loc[(self.gspc_df.index >= np.datetime64(start_date.date())) & \n",
    "                                 (self.gspc_df.index <= np.datetime64(end_date)),\"Open\"].mean()\n",
    "            else:\n",
    "                price = self.gspc_df.loc[(self.gspc_df.index >= np.datetime64(start_date.date())) & \n",
    "                                 (self.gspc_df.index <= np.datetime64(end_date)),\"Adj Close\"].mean()\n",
    "        except IndexError:\n",
    "                price = np.nan\n",
    "        return price\n",
    "\n",
    "    def calculate_pct_change(self,end_value,start_value):\n",
    "        pct_change = (end_value - start_value) / start_value\n",
    "        pct_change = round(pct_change,4) * 100\n",
    "        return pct_change\n",
    "\n",
    "    def weekday_check(self,date):  \n",
    "        while date.isoweekday() > 5 or date.date() in self.nyse_holidays:\n",
    "            date = date + datetime.timedelta(days=-1)\n",
    "        return date"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 2. Get S&P 500 Data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>Security</th>\n",
       "      <th>SEC filings</th>\n",
       "      <th>GICS Sector</th>\n",
       "      <th>GICS Sub Industry</th>\n",
       "      <th>Location</th>\n",
       "      <th>Date first added[3][4]</th>\n",
       "      <th>CIK</th>\n",
       "      <th>Founded</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Ticker symbol</th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>MMM</th>\n",
       "      <td>3M Company</td>\n",
       "      <td>reports</td>\n",
       "      <td>Industrials</td>\n",
       "      <td>Industrials</td>\n",
       "      <td>St. Paul, Minnesota</td>\n",
       "      <td>NaN</td>\n",
       "      <td>66740</td>\n",
       "      <td>1902</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>ABT</th>\n",
       "      <td>Abbott Laboratories</td>\n",
       "      <td>reports</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>North Chicago, Illinois</td>\n",
       "      <td>1964-03-31</td>\n",
       "      <td>1800</td>\n",
       "      <td>1888</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>ABBV</th>\n",
       "      <td>AbbVie Inc.</td>\n",
       "      <td>reports</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>North Chicago, Illinois</td>\n",
       "      <td>2012-12-31</td>\n",
       "      <td>1551152</td>\n",
       "      <td>2013 (1888)</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>ACN</th>\n",
       "      <td>Accenture plc</td>\n",
       "      <td>reports</td>\n",
       "      <td>Information Technology</td>\n",
       "      <td>Information Technology</td>\n",
       "      <td>Dublin, Ireland</td>\n",
       "      <td>2011-07-06</td>\n",
       "      <td>1467373</td>\n",
       "      <td>1989</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>ATVI</th>\n",
       "      <td>Activision Blizzard</td>\n",
       "      <td>reports</td>\n",
       "      <td>Information Technology</td>\n",
       "      <td>Information Technology</td>\n",
       "      <td>Santa Monica, California</td>\n",
       "      <td>2015-08-31</td>\n",
       "      <td>718877</td>\n",
       "      <td>2008</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                          Security SEC filings             GICS Sector  \\\n",
       "Ticker symbol                                                            \n",
       "MMM                     3M Company     reports             Industrials   \n",
       "ABT            Abbott Laboratories     reports             Health Care   \n",
       "ABBV                   AbbVie Inc.     reports             Health Care   \n",
       "ACN                  Accenture plc     reports  Information Technology   \n",
       "ATVI           Activision Blizzard     reports  Information Technology   \n",
       "\n",
       "                    GICS Sub Industry                  Location  \\\n",
       "Ticker symbol                                                     \n",
       "MMM                       Industrials       St. Paul, Minnesota   \n",
       "ABT                       Health Care   North Chicago, Illinois   \n",
       "ABBV                      Health Care   North Chicago, Illinois   \n",
       "ACN            Information Technology           Dublin, Ireland   \n",
       "ATVI           Information Technology  Santa Monica, California   \n",
       "\n",
       "              Date first added[3][4]      CIK      Founded  \n",
       "Ticker symbol                                               \n",
       "MMM                              NaN    66740         1902  \n",
       "ABT                       1964-03-31     1800         1888  \n",
       "ABBV                      2012-12-31  1551152  2013 (1888)  \n",
       "ACN                       2011-07-06  1467373         1989  \n",
       "ATVI                      2015-08-31   718877         2008  "
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Get table of the S&P 500 tickers, CIK, and industry from Wikipedia\n",
    "wiki_url = \"https://en.wikipedia.org/wiki/List_of_S%26P_500_companies\"\n",
    "cik_df = pd.read_html(wiki_url,header=0,index_col=0)[0]\n",
    "cik_df['GICS Sector'] = cik_df['GICS Sector'].astype(\"category\")\n",
    "cik_df['GICS Sub Industry'] = cik_df['GICS Sector'].astype(\"category\")\n",
    "cik_df.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 3. Get List of 8K links from SEC Edgar"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": true,
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "sec_ext = SEC_Extractor\n",
    "no_parts = 2\n",
    "part_no = 3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 218,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 37%|███▋      | 189/505 [01:30<02:31,  2.09it/s]"
     ]
    },
    {
     "ename": "UnboundLocalError",
     "evalue": "local variable 'df' referenced before assignment",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mUnboundLocalError\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-218-1528d5ae21fb>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m      2\u001b[0m \u001b[0mcompany_list\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcik_df\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'CIK'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto_dict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      3\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mticker\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mcik\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mtqdm\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcompany_list\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m     \u001b[0mdf_list\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msec_ext\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_doc_links\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcik\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mticker\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      5\u001b[0m \u001b[0mdoc_links_df\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconcat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdf_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0maxis\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      6\u001b[0m \u001b[0mdoc_links_df\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdoc_links_df\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_index\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"ticker\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcik_df\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'GICS Sector'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcik_df\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'GICS Sub Industry'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreset_index\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrename\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcolumns\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0;34m\"index\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\"ticker\"\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<ipython-input-2-46a7332a54be>\u001b[0m in \u001b[0;36mget_doc_links\u001b[0;34m(cik, ticker)\u001b[0m\n\u001b[1;32m     50\u001b[0m         \u001b[0;32mexcept\u001b[0m \u001b[0mrequests\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexceptions\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mConnectionError\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     51\u001b[0m                 \u001b[0msleep\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m.1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 52\u001b[0;31m         \u001b[0;32mreturn\u001b[0m \u001b[0mdf\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     53\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     54\u001b[0m     \u001b[0;31m# Extracts text and submission datetime from document link\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mUnboundLocalError\u001b[0m: local variable 'df' referenced before assignment"
     ]
    }
   ],
   "source": [
    "df_list = []\n",
    "company_list = cik_df['CIK'].to_dict()\n",
    "for (ticker,cik) in tqdm(company_list.items()):\n",
    "    df_list.append(sec_ext.get_doc_links(cik,ticker))\n",
    "doc_links_df = pd.concat(df_list,axis=0)\n",
    "doc_links_df = doc_links_df.set_index(\"ticker\").join(cik_df['GICS Sector']).join(cik_df['GICS Sub Industry']).reset_index().rename(columns={\"index\":\"ticker\"})\n",
    "doc_links_df.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "doc_links_df.to_pickle(\"Pickles/doc_links_df.pkl\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 4. Download 8Ks & Stock Movements"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Split data into how many parts?1\n",
      "Which part is this?1\n",
      "Number of rows to process at once (10 to 50 recommended)1\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "10it [00:15,  1.56s/it]\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>ticker</th>\n",
       "      <th>cik</th>\n",
       "      <th>doc_name</th>\n",
       "      <th>txt_link</th>\n",
       "      <th>GICS Sector</th>\n",
       "      <th>GICS Sub Industry</th>\n",
       "      <th>text</th>\n",
       "      <th>release_date</th>\n",
       "      <th>items</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>A</td>\n",
       "      <td>1090872</td>\n",
       "      <td>0001564590-18-006570.txt</td>\n",
       "      <td>http://www.sec.gov/Archives/edgar/data/1090872...</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>0001564590-18-006570.txt : 20180322 0001564590...</td>\n",
       "      <td>2018-03-22 16:22:07</td>\n",
       "      <td>[Item 5.07]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>A</td>\n",
       "      <td>1090872</td>\n",
       "      <td>0001090872-18-000002.txt</td>\n",
       "      <td>http://www.sec.gov/Archives/edgar/data/1090872...</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>0001090872-18-000002.txt : 20180214 0001090872...</td>\n",
       "      <td>2018-02-14 16:27:02</td>\n",
       "      <td>[Item 2.02, Item 2.02, Item 9.01]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>A</td>\n",
       "      <td>1090872</td>\n",
       "      <td>0001564590-18-000605.txt</td>\n",
       "      <td>http://www.sec.gov/Archives/edgar/data/1090872...</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>0001564590-18-000605.txt : 20180118 0001564590...</td>\n",
       "      <td>2018-01-18 16:09:52</td>\n",
       "      <td>[Item 5.02, Item 9.01]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>A</td>\n",
       "      <td>1090872</td>\n",
       "      <td>0001090872-17-000015.txt</td>\n",
       "      <td>http://www.sec.gov/Archives/edgar/data/1090872...</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>0001090872-17-000015.txt : 20171120 0001090872...</td>\n",
       "      <td>2017-11-20 16:09:02</td>\n",
       "      <td>[Item 2.02, Item 2.02, Item 9.01]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>A</td>\n",
       "      <td>1090872</td>\n",
       "      <td>0001090872-17-000011.txt</td>\n",
       "      <td>http://www.sec.gov/Archives/edgar/data/1090872...</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>0001090872-17-000011.txt : 20170815 0001090872...</td>\n",
       "      <td>2017-08-15 16:12:29</td>\n",
       "      <td>[Item 2.02, Item 2.02, Item 9.01]</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "  ticker      cik                  doc_name  \\\n",
       "0      A  1090872  0001564590-18-006570.txt   \n",
       "1      A  1090872  0001090872-18-000002.txt   \n",
       "2      A  1090872  0001564590-18-000605.txt   \n",
       "3      A  1090872  0001090872-17-000015.txt   \n",
       "4      A  1090872  0001090872-17-000011.txt   \n",
       "\n",
       "                                            txt_link  GICS Sector  \\\n",
       "0  http://www.sec.gov/Archives/edgar/data/1090872...  Health Care   \n",
       "1  http://www.sec.gov/Archives/edgar/data/1090872...  Health Care   \n",
       "2  http://www.sec.gov/Archives/edgar/data/1090872...  Health Care   \n",
       "3  http://www.sec.gov/Archives/edgar/data/1090872...  Health Care   \n",
       "4  http://www.sec.gov/Archives/edgar/data/1090872...  Health Care   \n",
       "\n",
       "  GICS Sub Industry                                               text  \\\n",
       "0       Health Care  0001564590-18-006570.txt : 20180322 0001564590...   \n",
       "1       Health Care  0001090872-18-000002.txt : 20180214 0001090872...   \n",
       "2       Health Care  0001564590-18-000605.txt : 20180118 0001564590...   \n",
       "3       Health Care  0001090872-17-000015.txt : 20171120 0001090872...   \n",
       "4       Health Care  0001090872-17-000011.txt : 20170815 0001090872...   \n",
       "\n",
       "         release_date                              items  \n",
       "0 2018-03-22 16:22:07                        [Item 5.07]  \n",
       "1 2018-02-14 16:27:02  [Item 2.02, Item 2.02, Item 9.01]  \n",
       "2 2018-01-18 16:09:52             [Item 5.02, Item 9.01]  \n",
       "3 2017-11-20 16:09:02  [Item 2.02, Item 2.02, Item 9.01]  \n",
       "4 2017-08-15 16:12:29  [Item 2.02, Item 2.02, Item 9.01]  "
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "while part_no > no_parts:\n",
    "    no_parts = int(input(\"Split data into how many parts?\"))\n",
    "    part_no = int(input(\"Which part is this?\"))\n",
    "    \n",
    "chunksize = int(input(\"Number of rows to process at once (10 to 50 recommended)\"))\n",
    "\n",
    "#Load pickle\n",
    "crawled_df = np.array_split(pd.read_pickle(\"Pickles/doc_links_df.pkl\"),no_parts)[part_no-1][:10]\n",
    "crawled_len = len(crawled_df['txt_link'])\n",
    "chunks = math.ceil(crawled_len/chunksize)\n",
    "\n",
    "df_list = []\n",
    "for i, df in tqdm(enumerate(np.array_split(crawled_df,chunks))):\n",
    "    df['text'], df['release_date'] = zip(*df['txt_link'].apply(sec_ext.extract_text))\n",
    "    df['items'] = df['text'].map(sec_ext.extract_item_no)\n",
    "    if not os.path.isfile(\"Data/texts_example{}.csv.gzip\".format(part_no)): #If no file exists, create one with header\n",
    "        df.to_csv(\"Data/texts_example{}.csv.gzip\".format(part_no),chunksize=chunksize,compression=\"gzip\")\n",
    "    else: # else it exists so append without writing the header\n",
    "        df.to_csv(\"Data/texts_example{}.csv.gzip\".format(part_no),mode=\"a\",header=False,compression=\"gzip\",chunksize=chunksize)       \n",
    "    df_list.append(df)\n",
    "    del df\n",
    "    \n",
    "    if i % 50 == 0:\n",
    "        gc.collect()\n",
    "df = pd.concat(df_list)\n",
    "df.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Load Financial Data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "cik_dict = cik_df['CIK'].to_dict()\n",
    "cik_dict = {v: k for k, v in cik_dict.items()}\n",
    "df['ticker'] = df['cik'].map(cik_dict)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "df1_gen = pd.read_csv(\"Data/texts1.csv.gzip\",compression=\"gzip\",parse_dates=['release_date'],chunksize=1000,index_col=[0])\n",
    "#df2_gen = pd.read_csv(\"Data/texts2.csv.gzip\",compression=\"gzip\",parse_dates=['release_date'],chunksize=1000)\n",
    "df1 = pd.concat([df for df in df1_gen])\n",
    "#df2 = pd.concat([df for df in df2_gen])\n",
    "#df2 = pd.read_csv(\"Data/texts2.csv\",parse_dates=['release_date'],encoding=\"utf_8\",index_col=[0])\n",
    "gc.collect()\n",
    "df = pd.concat([df1,df2],axis=0)\n",
    "gc.collect()\n",
    "df['items'] = df['items'].map(lambda x: ast.literal_eval(x))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>cik</th>\n",
       "      <th>doc_name</th>\n",
       "      <th>txt_link</th>\n",
       "      <th>GICS Sector</th>\n",
       "      <th>text</th>\n",
       "      <th>release_date</th>\n",
       "      <th>items</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>A</th>\n",
       "      <td>1090872</td>\n",
       "      <td>0001564590-18-006570.txt</td>\n",
       "      <td>http://www.sec.gov/Archives/edgar/data/1090872...</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>0001564590-18-006570.txt : 20180322 0001564590...</td>\n",
       "      <td>2018-03-22 16:22:07</td>\n",
       "      <td>[Item 5.07]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>A</th>\n",
       "      <td>1090872</td>\n",
       "      <td>0001090872-18-000002.txt</td>\n",
       "      <td>http://www.sec.gov/Archives/edgar/data/1090872...</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>0001090872-18-000002.txt : 20180214 0001090872...</td>\n",
       "      <td>2018-02-14 16:27:02</td>\n",
       "      <td>[Item 2.02, Item 2.02, Item 9.01]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>A</th>\n",
       "      <td>1090872</td>\n",
       "      <td>0001564590-18-000605.txt</td>\n",
       "      <td>http://www.sec.gov/Archives/edgar/data/1090872...</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>0001564590-18-000605.txt : 20180118 0001564590...</td>\n",
       "      <td>2018-01-18 16:09:52</td>\n",
       "      <td>[Item 5.02, Item 9.01]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>A</th>\n",
       "      <td>1090872</td>\n",
       "      <td>0001090872-17-000015.txt</td>\n",
       "      <td>http://www.sec.gov/Archives/edgar/data/1090872...</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>0001090872-17-000015.txt : 20171120 0001090872...</td>\n",
       "      <td>2017-11-20 16:09:02</td>\n",
       "      <td>[Item 2.02, Item 2.02, Item 9.01]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>A</th>\n",
       "      <td>1090872</td>\n",
       "      <td>0001090872-17-000011.txt</td>\n",
       "      <td>http://www.sec.gov/Archives/edgar/data/1090872...</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>0001090872-17-000011.txt : 20170815 0001090872...</td>\n",
       "      <td>2017-08-15 16:12:29</td>\n",
       "      <td>[Item 2.02, Item 2.02, Item 9.01]</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "       cik                  doc_name  \\\n",
       "A  1090872  0001564590-18-006570.txt   \n",
       "A  1090872  0001090872-18-000002.txt   \n",
       "A  1090872  0001564590-18-000605.txt   \n",
       "A  1090872  0001090872-17-000015.txt   \n",
       "A  1090872  0001090872-17-000011.txt   \n",
       "\n",
       "                                            txt_link  GICS Sector  \\\n",
       "A  http://www.sec.gov/Archives/edgar/data/1090872...  Health Care   \n",
       "A  http://www.sec.gov/Archives/edgar/data/1090872...  Health Care   \n",
       "A  http://www.sec.gov/Archives/edgar/data/1090872...  Health Care   \n",
       "A  http://www.sec.gov/Archives/edgar/data/1090872...  Health Care   \n",
       "A  http://www.sec.gov/Archives/edgar/data/1090872...  Health Care   \n",
       "\n",
       "                                                text        release_date  \\\n",
       "A  0001564590-18-006570.txt : 20180322 0001564590... 2018-03-22 16:22:07   \n",
       "A  0001090872-18-000002.txt : 20180214 0001090872... 2018-02-14 16:27:02   \n",
       "A  0001564590-18-000605.txt : 20180118 0001564590... 2018-01-18 16:09:52   \n",
       "A  0001090872-17-000015.txt : 20171120 0001090872... 2017-11-20 16:09:02   \n",
       "A  0001090872-17-000011.txt : 20170815 0001090872... 2017-08-15 16:12:29   \n",
       "\n",
       "                               items  \n",
       "A                        [Item 5.07]  \n",
       "A  [Item 2.02, Item 2.02, Item 9.01]  \n",
       "A             [Item 5.02, Item 9.01]  \n",
       "A  [Item 2.02, Item 2.02, Item 9.01]  \n",
       "A  [Item 2.02, Item 2.02, Item 9.01]  "
      ]
     },
     "execution_count": 38,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Find rows flagged where no date was found\n",
    "df = df.loc[~(df['release_date'] >= pd.datetime(year=2018,month=5,day=1))]\n",
    "df = df.drop_duplicates(subset=\"doc_name\")\n",
    "df.index.names = ['ticker']\n",
    "df = df.reset_index()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "179"
      ]
     },
     "execution_count": 48,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>ticker</th>\n",
       "      <th>cik</th>\n",
       "      <th>doc_name</th>\n",
       "      <th>txt_link</th>\n",
       "      <th>GICS Sector</th>\n",
       "      <th>text</th>\n",
       "      <th>release_date</th>\n",
       "      <th>items</th>\n",
       "      <th>price_change</th>\n",
       "      <th>vix</th>\n",
       "      <th>rm_week</th>\n",
       "      <th>rm_month</th>\n",
       "      <th>rm_qtr</th>\n",
       "      <th>rm_year</th>\n",
       "      <th>signal</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>A</td>\n",
       "      <td>1090872</td>\n",
       "      <td>0001564590-18-006570.txt</td>\n",
       "      <td>http://www.sec.gov/Archives/edgar/data/1090872...</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>0001564590-18-006570.txt : 20180322 0001564590...</td>\n",
       "      <td>2018-03-22 16:22:07</td>\n",
       "      <td>[Item 5.07]</td>\n",
       "      <td>0.28</td>\n",
       "      <td>23.34</td>\n",
       "      <td>-0.41</td>\n",
       "      <td>-3.07</td>\n",
       "      <td>2.01</td>\n",
       "      <td>26.67</td>\n",
       "      <td>stay</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>A</td>\n",
       "      <td>1090872</td>\n",
       "      <td>0001090872-18-000002.txt</td>\n",
       "      <td>http://www.sec.gov/Archives/edgar/data/1090872...</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>0001090872-18-000002.txt : 20180214 0001090872...</td>\n",
       "      <td>2018-02-14 16:27:02</td>\n",
       "      <td>[Item 2.02, Item 2.02, Item 9.01]</td>\n",
       "      <td>7.22</td>\n",
       "      <td>19.26</td>\n",
       "      <td>1.95</td>\n",
       "      <td>-5.76</td>\n",
       "      <td>-3.94</td>\n",
       "      <td>35.67</td>\n",
       "      <td>up</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>A</td>\n",
       "      <td>1090872</td>\n",
       "      <td>0001564590-18-000605.txt</td>\n",
       "      <td>http://www.sec.gov/Archives/edgar/data/1090872...</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>0001564590-18-000605.txt : 20180118 0001564590...</td>\n",
       "      <td>2018-01-18 16:09:52</td>\n",
       "      <td>[Item 5.02, Item 9.01]</td>\n",
       "      <td>0.47</td>\n",
       "      <td>12.22</td>\n",
       "      <td>1.22</td>\n",
       "      <td>5.17</td>\n",
       "      <td>3.38</td>\n",
       "      <td>39.20</td>\n",
       "      <td>stay</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>A</td>\n",
       "      <td>1090872</td>\n",
       "      <td>0001090872-17-000015.txt</td>\n",
       "      <td>http://www.sec.gov/Archives/edgar/data/1090872...</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>0001090872-17-000015.txt : 20171120 0001090872...</td>\n",
       "      <td>2017-11-20 16:09:02</td>\n",
       "      <td>[Item 2.02, Item 2.02, Item 9.01]</td>\n",
       "      <td>0.31</td>\n",
       "      <td>10.65</td>\n",
       "      <td>2.71</td>\n",
       "      <td>1.31</td>\n",
       "      <td>10.13</td>\n",
       "      <td>40.26</td>\n",
       "      <td>stay</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>A</td>\n",
       "      <td>1090872</td>\n",
       "      <td>0001090872-17-000011.txt</td>\n",
       "      <td>http://www.sec.gov/Archives/edgar/data/1090872...</td>\n",
       "      <td>Health Care</td>\n",
       "      <td>0001090872-17-000011.txt : 20170815 0001090872...</td>\n",
       "      <td>2017-08-15 16:12:29</td>\n",
       "      <td>[Item 2.02, Item 2.02, Item 9.01]</td>\n",
       "      <td>5.19</td>\n",
       "      <td>12.04</td>\n",
       "      <td>-0.21</td>\n",
       "      <td>-3.34</td>\n",
       "      <td>4.39</td>\n",
       "      <td>21.40</td>\n",
       "      <td>up</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "  ticker      cik                  doc_name  \\\n",
       "0      A  1090872  0001564590-18-006570.txt   \n",
       "1      A  1090872  0001090872-18-000002.txt   \n",
       "2      A  1090872  0001564590-18-000605.txt   \n",
       "3      A  1090872  0001090872-17-000015.txt   \n",
       "4      A  1090872  0001090872-17-000011.txt   \n",
       "\n",
       "                                            txt_link  GICS Sector  \\\n",
       "0  http://www.sec.gov/Archives/edgar/data/1090872...  Health Care   \n",
       "1  http://www.sec.gov/Archives/edgar/data/1090872...  Health Care   \n",
       "2  http://www.sec.gov/Archives/edgar/data/1090872...  Health Care   \n",
       "3  http://www.sec.gov/Archives/edgar/data/1090872...  Health Care   \n",
       "4  http://www.sec.gov/Archives/edgar/data/1090872...  Health Care   \n",
       "\n",
       "                                                text        release_date  \\\n",
       "0  0001564590-18-006570.txt : 20180322 0001564590... 2018-03-22 16:22:07   \n",
       "1  0001090872-18-000002.txt : 20180214 0001090872... 2018-02-14 16:27:02   \n",
       "2  0001564590-18-000605.txt : 20180118 0001564590... 2018-01-18 16:09:52   \n",
       "3  0001090872-17-000015.txt : 20171120 0001090872... 2017-11-20 16:09:02   \n",
       "4  0001090872-17-000011.txt : 20170815 0001090872... 2017-08-15 16:12:29   \n",
       "\n",
       "                               items  price_change    vix  rm_week  rm_month  \\\n",
       "0                        [Item 5.07]          0.28  23.34    -0.41     -3.07   \n",
       "1  [Item 2.02, Item 2.02, Item 9.01]          7.22  19.26     1.95     -5.76   \n",
       "2             [Item 5.02, Item 9.01]          0.47  12.22     1.22      5.17   \n",
       "3  [Item 2.02, Item 2.02, Item 9.01]          0.31  10.65     2.71      1.31   \n",
       "4  [Item 2.02, Item 2.02, Item 9.01]          5.19  12.04    -0.21     -3.34   \n",
       "\n",
       "   rm_qtr  rm_year signal  \n",
       "0    2.01    26.67   stay  \n",
       "1   -3.94    35.67     up  \n",
       "2    3.38    39.20   stay  \n",
       "3   10.13    40.26   stay  \n",
       "4    4.39    21.40     up  "
      ]
     },
     "execution_count": 49,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#from FinDataExtractor import FinDataExtractor\n",
    "fin_data = FinDataExtractor()\n",
    "## Load pickle of ticker, date, and doc number\n",
    "\n",
    "df['price_change'],df['vix'] = zip(*df[['ticker','release_date']].apply(fin_data.get_change,axis=1))\n",
    "df['rm_week'] = df[['ticker','release_date']].apply(fin_data.get_historical_movements,period=\"week\",axis=1)\n",
    "df['rm_month'] = df[['ticker','release_date']].apply(fin_data.get_historical_movements,period=\"month\",axis=1)\n",
    "df['rm_qtr'] = df[['ticker','release_date']].apply(fin_data.get_historical_movements,period=\"quarter\",axis=1)\n",
    "df['rm_year'] = df[['ticker','release_date']].apply(fin_data.get_historical_movements,period=\"year\",axis=1)\n",
    "df[\"signal\"] = df['price_change'].map(lambda x: \"stay\" if -1<x<1 else (\"up\" if x>1 else \"down\"))\n",
    "df.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 144,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "20it [19:54, 59.72s/it]\n"
     ]
    }
   ],
   "source": [
    "chunks = 20\n",
    "for i, df_part in tqdm(enumerate(np.array_split(df,chunks))):\n",
    "    if not os.path.isfile(\"Data/texts_and_fin.csv\"): #If no file exists, create one with header\n",
    "        df_part.to_csv(\"Data/texts_and_fin.csv\")\n",
    "    else: # else it exists so append without writing the header\n",
    "        df_part.to_csv(\"Data/texts_and_fin.csv\",mode=\"a\",header=False,)       \n",
    "    del df_part\n",
    "    \n",
    "    if i % 50 == 0:\n",
    "        gc.collect()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
