#!/usr/bin/env python
# -*- coding: utf-8 -*-
import collections
import math
import os
import re
import ssl
import sys
from typing import List, Tuple
import traceback

import fitz  # pip install PyMuPDF
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm

ssl._create_default_https_context = ssl._create_unverified_context

pd = re.compile(r'([\d.]+)')
p1 = re.compile(r'([\d.,]+\s*[TtOo:]+\s*1)[^\d]')
p2 = re.compile(r'[\d.,]+\s*to one')
p3 = re.compile(r'[\d.,]+\s*times the median')

DATA_DIR = 'data'
COMPANY_FILE = 'CIK-5.12.2021.txt'
TARGET_CSV = 'pay_ratio_target.csv'
RESULT_CSV = 'pay_ratio_result.csv'
DOCUMENT_WORD_OR_PHRASE = 'pay ratio'
FAILED_TO_GET_RATIO = 'Fail_to_get_ratio'
SEP = '\t'
START_DATE = '2017-01-01'
END_DATE = '2021-06-04'
ENABLE_PROXY = False  # 是否启用代理服务器
FIRST_N_COMPANY = None  # None stands for all


def _get_proxies():
    """
    获取代理服务器
    :return:
    """
    # return {
    #     'http': 'http://100.100.154.250:3128',
    #     'https': 'http://100.100.154.250:3128',
    # } if ENABLE_PROXY else None
    return {
        'http': 'http://127.0.0.1:1081',
        'https': 'http://127.0.0.1:1081',
    } if ENABLE_PROXY else None


def __search_one_company_links(cik_company: List[str]) -> list:
    def __id2link(cik: str, id: str) -> str:
        url = 'https://www.sec.gov/Archives/edgar/data/{cik}/{p1}/{p2}'
        ss = id.split(':')
        p1 = ss[0].replace('-', '')
        p2 = ss[1]
        return url.format(cik=cik, p1=p1, p2=p2)

    def __get_year(date: str):
        return date[:4]

    _, company, cik = cik_company
    url = 'https://efts.sec.gov/LATEST/search-index'
    data = {
        'q': DOCUMENT_WORD_OR_PHRASE,
        'dateRange': 'custom',
        'entityName': company,
        'category': 'custom',
        'forms': ['DEF 14A'],
        'startdt': START_DATE,
        'enddt': END_DATE
    }
    try:
        resp = requests.post(url, json=data, proxies=_get_proxies(), timeout=10)
        hits = resp.json()['hits']['hits']  # array
        resultant_list = []
        for h in hits:
            first_cik = h['_source']['ciks'][0]
            if cik not in first_cik:
                continue

            display_name = h['_source']['display_names'][0]
            file_date = h['_source']['file_date']
            year = __get_year(file_date)  # 按file_date自取来确定年份
            _id = h['_id']
            link = __id2link(first_cik, _id)
            company_at_year = (first_cik, display_name, year, link)
            resultant_list.append(company_at_year)
            # print(one_company_year)
        resultant_list = keep_htm_drop_pdf_when_duplicated(resultant_list)
    except:
        pass
    finally:
        return resultant_list


def __write_csv(result_list, out):
    """
    写入csv文件
    :param result_list:
    :param out:
    :return:
    """
    headers = ['cik', 'name', 'year', 'link']
    result_list = [headers] + result_list  # 所有行写成list
    # x代表一行，是一个list，x内元素按Tab连接起来成为str。result_list is a list of string now
    lines = [SEP.join(x) + '\n' for x in result_list]
    with open(out, 'w') as f:
        f.writelines(lines)


def search_and_store_all_links():
    """
    1. read local files which containing target companies
    2. search in sec.gov
    3. write cik,year,url to csv file
    :return:
    """

    def __read_ciks_companies(path=COMPANY_FILE):
        with open(path, encoding='utf-8') as f:
            return [x.strip().split(SEP) for x in f.readlines() if len(x.strip().split(SEP)) == 3][1:]

    ticker_comp_cik = __read_ciks_companies()
    ticker_comp_cik = ticker_comp_cik[:FIRST_N_COMPANY] if FIRST_N_COMPANY is not None else ticker_comp_cik
    result_list = []
    for comp in tqdm(ticker_comp_cik):
            result_list += __search_one_company_links(comp)
    __write_csv(result_list, TARGET_CSV)


def _get_digit_ratio(ratio_param: str):
    ratio = ratio_param
    ratio = ratio.replace(',', '')
    ratio = ratio.replace('times the median', '')
    ratio = ratio.replace('to one', '')
    m = pd.search(ratio)
    if m:
        return m.group(1)
    print("Fail to parse ratio from: " + ratio_param)
    return FAILED_TO_GET_RATIO


def _parse_get_ratio(content: str):
    soup = BeautifulSoup(content, 'lxml')
    content = soup.get_text()

    ratio = FAILED_TO_GET_RATIO
    candidates = []

    ms1 = p1.findall(content)
    ms2 = p2.findall(content)
    ms3 = p3.findall(content)
    candidates.extend(ms1)
    candidates.extend(ms2)
    candidates.extend(ms3)

    candidate_count = len(candidates)
    if candidate_count == 0:
        return ratio
    elif candidate_count == 1:
        ratio = _get_digit_ratio(candidates[0])
    else:
        distances = [_find_distance_to_ratio(content, x) for x in candidates]
        min_idx = -1
        min_dis = sys.maxsize
        for i, d in enumerate(distances):
            if d <= min_dis:
                min_idx = i
                min_dis = d

        if min_idx >= 0:
            ratio = candidates[min_idx]

    ratio = _get_digit_ratio(ratio) if ratio != FAILED_TO_GET_RATIO else FAILED_TO_GET_RATIO
    return ratio


def _find_distance_to_ratio(content: str, candidate: str):
    content = content.lower()
    candidate = candidate.lower()
    target_keyword = ' ratio '
    candidate_position = content.find(candidate)
    if candidate_position == -1:
        raise Exception("正则匹配结果无法在原文找到")
    prev_index = content.rfind(target_keyword, 0, candidate_position)
    next_index = content.find(target_keyword, candidate_position, -1)
    if prev_index == -1 and next_index == -1:
        return sys.maxsize

    nd = math.fabs(next_index - candidate_position)
    if prev_index == -1:
        return nd

    pd = math.fabs(prev_index - candidate_position)
    if next_index == -1:
        return pd

    min_distance = min(pd, nd)
    return min_distance


def _get_text_from_response(t: str, response) -> str:
    def _get_text(filepath: str) -> str:
        with fitz.open(filepath) as doc:
            text = ""
            for page in doc:
                text += page.getText().strip()
            return text

    if t == 'pdf':
        with open('tmpfile', 'wb') as f:
            f.write(response.content)
        text = _get_text('tmpfile')
        return text
    else:
        return response.text


def _get_ratio(url: str, full_file_path: str):
    headers = {
        'referer': 'https://www.sec.gov/edgar/search/',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36'
    }
    try:
        response = requests.get(url, headers=headers, timeout=10)
        t = url.lower()[-3:]
        text = _get_text_from_response(t, response)
        ratio = _parse_get_ratio(text)
        if ratio:
            return ratio

        with open(full_file_path + url[-3:], 'wb') as out_file:  # for debug
            out_file.write(response.content)
    except:
        return FAILED_TO_GET_RATIO


def run_all():
    # read target
    path = TARGET_CSV
    with open(path, 'r') as f:
        lines = f.readlines()
    lines = lines[1:]  # skip header
    lines = lines[:FIRST_N_COMPANY] if FIRST_N_COMPANY is not None else lines  # cut tail for debugging
    d = collections.OrderedDict()
    for line in lines:
        line = line.strip()
        ss = line.split(SEP)
        d[ss[0] + '-' + ss[2]] = line

    # read result since last run, for continuing and skip what have done
    if os.path.isfile(RESULT_CSV):
        with open(RESULT_CSV, 'r', encoding='utf-8') as f:
            result_lines = f.readlines()
        result_lines = [x.strip() for x in result_lines]
        for rl in result_lines:
            ss = rl.split(SEP)
            ratio = ss[-1]
            if ratio == FAILED_TO_GET_RATIO or ratio.startswith('http'):
                continue
            d[ss[0] + '-' + ss[2]] += SEP + ratio + '\n'  # load result from last run

    lines = [x for x in d.values() if x.strip().split(SEP)[-1].startswith('http')]  # skip when with result
    print("ready to parse {} documents.".format(len(lines)))
    for line in tqdm(lines):
        ss = line.strip().split(SEP)
        url = ss[-1]
        cik_year = ss[0] + '-' + ss[2]
        path = os.path.join(DATA_DIR, cik_year)
        ratio = _get_ratio(url, path)
        d[cik_year] += SEP + ratio + '\n'

    lines = [x for x in d.values() if x.strip().endswith(FAILED_TO_GET_RATIO)]  # skip when with result
    print("Without result {}".format(len(lines)))
    with open(RESULT_CSV, 'w', encoding='utf-8') as f:
        f.writelines(d.values())


def _debug(htm_file):
    with open(htm_file, 'r') as f:
        content = ' '.join(f.readlines())
    content = content.encode('utf-8')
    _parse_get_ratio(content)


def keep_htm_drop_pdf_when_duplicated(lines: List[Tuple]):
    before_length = len(lines)
    d = collections.OrderedDict()
    for ss in lines:
        key = ss[0] + '-' + ss[2]
        if key in d and ss[-1].endswith('pdf'):
            continue
        d[ss[0] + '-' + ss[2]] = ss

    after_length = len(d.values())
    # print("{}->{}".format(before_length, after_length))
    return d.values()


def test_re():
    s = 'The result of this analysis is a ratio of 323:1.'
    r = _parse_get_ratio(s)
    print(r)


if __name__ == '__main__':
    # search_and_store_all_links()
    run_all()
    # test_re()
    # debug('data/0000203527_get target failed2019.htm')
