import json
import scrapy
import csv
import logging
from scrapy_10k.items import Scrapy10KItem, SPACItem

# Setting logger format
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# search condition and constant var
START_DATE = "2001-01-01"
END_DATE = "2021-09-18"
NOT_FOUND = 'NOT_FOUND'
DOCUMENT_WORD_OR_PHRASE = ''
FILING_TYPES = '10-K'


def _id2link(cik: str, id: str) -> str:
    """
    helper function, use cik and id to make up hyper link
    :param cik:
    :param id: can be retrieved by searching
    :return: link to htm
    """
    url = 'https://www.sec.gov/Archives/edgar/data/{cik}/{p1}/{p2}'  # link template
    ss = id.split(':')
    p1 = ss[0].replace('-', '')  # remove '-'
    p2 = ss[1]
    return url.format(cik=cik, p1=p1, p2=p2)  # put them to the template


class Spider10k(scrapy.Spider):
    """
    class for spider
    """
    name = "Spider 10k with spac data"  # spider name

    def start_requests(self):  # spider start here
        rows = []  # 先读取csv文件，获取cik和其他列的信息，按行存储
        with open("original.csv", encoding='utf-8') as csvfile:  # a.csv contains original data
            reader = csv.DictReader(csvfile)  # read line as dictionary
            for row in reader:
                # # comment block for debugging
                # if len(rows) > 100:
                #     break
                if row['CIK'] is not None and row['CIK'] != "":  # skip blank cik
                    rows.append(row)

        url = "https://efts.sec.gov/LATEST/search-index"
        logger.info("#cik ready to scrapy: {}".format(len(rows)))
        for row in rows:
            data = {
                'q': DOCUMENT_WORD_OR_PHRASE,
                'dateRange': 'all',
                'category': 'custom',
                'entityName': row['CIK'],  # entityName is NOT a typo, query key should be enetityName when value is cik
                'forms': [FILING_TYPES],
                'startdt': START_DATE,
                'enddt': END_DATE
            }
            # have to use yield, send a POST request, simulating search action
            # response body will be passed to function parse to to get detail information
            yield scrapy.Request(url=url, method="POST", body=json.dumps(data),
                                 headers={'Content-Type': 'application/json'}, meta={'row': row},
                                 callback=self.parse)

    def parse(self, response):
        # get data from meta dictionary
        SPAC = str(response.meta['row']['SPAC'])
        CompanyName = str(response.meta['row']['CompanyName'])
        FullName = str(response.meta['row']['Full Name'])
        Ticker = str(response.meta['row']['Ticker'])
        CIK = str(response.meta['row']['CIK'])
        CIK = CIK if CIK.startswith('000') else '000' + CIK  # because some cik starts with 000, others not
        DataSource = str(response.meta['row']['Data Source'])

        try:
            hits = json.loads(response.text)['hits']['hits']  # load response as json array
            if len(hits) == 0:  # if hits is empty, it means no search result. return empty_item with NOT_FOUND
                logger.info("invalid cik:" + CIK)
                empty_item = SPACItem(
                    SPAC=SPAC,
                    CompanyName=CompanyName,
                    FullName=FullName,
                    Ticker=Ticker,
                    CIK=CIK,
                    DataSource=DataSource,
                    FileDate=NOT_FOUND,
                    ReportingFor=NOT_FOUND,
                    link=NOT_FOUND
                )
                yield empty_item
                return

            # if program can run to here, it means hits. Every element in hits stands for a search result line.
            for h in hits:
                file_date = h['_source']['file_date']
                reporting_for = h['_source']['period_ending']
                link = _id2link(CIK, h['_id'])  # get id from search result, and then get link
                item = SPACItem(
                    SPAC=SPAC,
                    CompanyName=CompanyName,
                    FullName=FullName,
                    Ticker=Ticker,
                    CIK=CIK,
                    DataSource=DataSource,
                    FileDate=file_date,
                    ReportingFor=reporting_for,
                    link=link
                )
                yield item

        except: # log ERROR, if run in unexpected conner
            logger.info("Fail to search cik({})".format(CIK))
