import collections
import json
from abc import ABC
import sys
sys.path.append(".")
[print(x) for x in sys.path]

import scrapy
from scrapy.crawler import CrawlerProcess

from payratio.items import PayratioItem
from payratio.spiders.RatioParser import RatioParser

FIRST_N_COMPANY = None
# search condition and constant var
START_DATE = "2001-01-01"
END_DATE = "2023-02-25"
NOT_FOUND = 'NOT_FOUND'
DOCUMENT_WORD_OR_PHRASE = 'pay ratio'
FILING_TYPES = 'DEF 14A'
SEP = '\t'


def _str_to_float(exp: str):
    value = None
    try:
        value = float(exp)
    except ValueError:
        pass
    return value


def add_0_to_complete_cik(cik: str) -> str:
    length = len(cik)
    if length < 10:
        return '0' * (10 - length) + cik
    return cik


def id2link(cik: str, id: str) -> str:
    url = 'https://www.sec.gov/Archives/edgar/data/{cik}/{p1}/{p2}'
    ss = id.split(':')
    p1 = ss[0].replace('-', '')
    p2 = ss[1]
    return url.format(cik=cik, p1=p1, p2=p2)


def keep_htm_drop_pdf_when_duplicated(lines):
    before_length = len(lines)
    d = collections.OrderedDict()
    for ss in lines:
        key = ss[0] + '-' + ss[2]
        if key in d and ss[-1].endswith('pdf'):
            continue
        d[ss[0] + '-' + ss[2]] = ss

    after_length = len(d.values())
    # print("{}->{}".format(before_length, after_length))
    return d.values()


class PayRatioSpider(scrapy.Spider, ABC):
    name = "payratio"

    def start_requests(self):
        with open('Ticker_CONM_CIK.txt', 'r') as f:
            lines = f.readlines()
            lines = lines[1:]  # skip header
            lines = lines[:FIRST_N_COMPANY] if FIRST_N_COMPANY is not None else lines  # cut tail for debugging
            lines = [x.strip() for x in lines]
            # lines = [add_0_to_complete_cik(x.strip()) for x in lines]

        for line in lines:
            ticker, comm, cik = line.split(SEP)
            data = {
                'q': DOCUMENT_WORD_OR_PHRASE,
                'dateRange': 'all',
                'category': 'custom',
                'entityName': cik,  # entityName is NOT a typo, query key should be enetityName when value is cik
                'forms': [FILING_TYPES],
                'startdt': START_DATE,
                'enddt': END_DATE
            }
            url = 'https://efts.sec.gov/LATEST/search-index'
            yield scrapy.Request(url=url, method="POST", body=json.dumps(data),
                                 headers={'Content-Type': 'application/json'},
                                 meta={'ticker': ticker, 'cik': cik}, callback=self.parse_search)

    def parse_search(self, response):
        ticker = response.meta['ticker']
        cik = response.meta['cik']

        resp_json = response.json()
        hits_array = resp_json['hits']['hits']

        resultant_list = []
        for h in hits_array:
            first_cik = h['_source']['ciks'][0]
            display_name = h['_source']['display_names'][0]
            file_date = h['_source']['file_date']
            year = file_date[:4]  # 按file_date自取来确定年份
            _id = h['_id']
            link = id2link(first_cik, _id)
            company_at_year = (first_cik, display_name, year, link)
            resultant_list.append(company_at_year)
            # print(one_company_year)
        resultant_list = keep_htm_drop_pdf_when_duplicated(resultant_list)

        for company_at_year in resultant_list:
            first_cik, display_name, year, link = company_at_year
            yield scrapy.Request(url=link, callback=self.parse_html,
                                 meta={'ticker': ticker, 'cik': cik, 'name': display_name, 'year': year, 'link': link})

    def parse_html(self, response):

        try:
            parser = RatioParser(response.text)

            company_name = response.meta['name']
            company_name = company_name[0:company_name.find('(') - 1].strip()

            compensations = parser.get_compensation()
            compensations = compensations if compensations else (None, None)
            ceo_compensation, emp_compensation = compensations

            ceo_compensation = _str_to_float(ceo_compensation)
            emp_compensation = _str_to_float(emp_compensation)
            ratio = _str_to_float(parser.get_ratio())
            ratio_from_compensation = None
            correct = None
            if ceo_compensation is not None and emp_compensation is not None:
                ratio_from_compensation = ceo_compensation / emp_compensation
                if ratio:
                    correct = 'correct' if abs(ratio - ratio_from_compensation) < 1 else 'wrong'
            fiscal_year = parser.get_fiscal_year_end()

            item = PayratioItem(
                ticker=response.meta['ticker'],
                cik=response.meta['cik'],
                year=response.meta['year'],
                link=response.meta['link'],
                name=company_name,
                fiscal=fiscal_year,
                ratio=ratio,
                ratio_from_compensation=ratio_from_compensation,
                ceo=ceo_compensation,
                emp=emp_compensation,
                validation=correct,
            )
        except:
            item = PayratioItem(
                ticker=response.meta['ticker'],
                cik=response.meta['cik'],
                year=response.meta['year'],
                link=response.meta['link'],
            )
        finally:
            return item


def main():
    process = CrawlerProcess(settings={
        "FEED_EXPORT_FIELDS": ['ticker', 'cik', 'name', 'year', 'fiscal', 'ratio', 'ratio_from_compensation', 'ceo',
                               'emp', 'validation', 'link'],
        "FEED_EXPORTERS": {
            'xlsx': 'scrapy_xlsx.XlsxItemExporter',
        },
        'FEEDS': {
            "sec.gov-20220502.xlsx": {"format": "xlsx", "overwrite": True},
        }
    })

    process.crawl(PayRatioSpider)
    process.start()  # the script will block here until the crawling is finished


if __name__ == '__main__':
    main()
