#!/usr/bin/env python 
# -*- coding: utf-8 -*-
import os
import ssl
from typing import List

import requests
from bs4 import BeautifulSoup
from lxml import etree
from tqdm import tqdm

ssl._create_default_https_context = ssl._create_unverified_context

DATA_DIR = 'data'
START_DATE = '2016-01-01'
END_DATE = '2021-05-11'
ENABLE_PROXY = False  # 是否启用代理服务器
FIRST_N_COMPANY = None  # None stands for all


def get_proxies():
    """
    获取代理服务器
    :return:
    """
    # return {
    #     'http': 'http://100.100.154.250:3128',
    #     'https': 'http://100.100.154.250:3128',
    # } if ENABLE_PROXY else None
    return {
        'http': 'http://localhost:8080',
        'https': 'http://localhost:8080',
    } if ENABLE_PROXY else None


def __search_one_company_links(cik_company: List[str]) -> list:
    def __id2link(cik: str, id: str) -> str:
        url = 'https://www.sec.gov/Archives/edgar/data/{cik}/{p1}/{p2}'
        ss = id.split(':')
        p1 = ss[0].replace('-', '')
        p2 = ss[1]
        return url.format(cik=cik, p1=p1, p2=p2)

    def __get_year(date: str):
        return date[:4]

    cik, company = cik_company
    url = 'https://efts.sec.gov/LATEST/search-index'
    data = {
        'q': 'Item 1A.',
        'category': 'custom',
        'entityName': company,
        'forms': ["10-K"],
        'startdt': START_DATE,
        'enddt': END_DATE
    }
    resp = requests.post(url, json=data, proxies=get_proxies())
    hits = resp.json()['hits']['hits']  # array
    resultant_list = []
    for h in hits:
        first_cik = h['_source']['ciks'][0]
        if cik not in first_cik:
            continue

        display_name = h['_source']['display_names'][0]
        file_date = h['_source']['file_date']
        reporting_for = h['_source']['period_ending']
        year = __get_year(reporting_for)  # 按reporting for自取来确定年份
        _id = h['_id']
        link = __id2link(first_cik, _id)
        one_company_year = (first_cik, display_name, year, file_date, reporting_for, link)
        resultant_list.append(one_company_year)
        # print(one_company_year)
    return resultant_list


def __write_csv(result_list, out):
    """
    写入csv文件
    :param result_list:
    :param out:
    :return:
    """
    headers = ['cik', 'display_name', 'year', 'file_date', 'reporting_for', 'link']
    result_list = [headers] + result_list  # 所有行写成list
    # x代表一行，是一个list，x内元素按Tab连接起来成为str。result_list is a list of string now
    lines = ['\t'.join(x) + '\n' for x in result_list]
    with open(out, 'w') as f:
        f.writelines(lines)


def search_and_store_all_links():
    """
    1. read local files which containing target companies
    2. search in sec.gov
    3. write cik,year,url to csv file
    :return:
    """

    def __read_ciks_companies(path='companies.txt'):
        with open(path, encoding='utf-8') as f:
            return [x.strip().split('\t') for x in f.readlines()][1:]

    ciks_companies = __read_ciks_companies()
    for s in ciks_companies:
        if len(s) != 2:
            print(s)
    ciks_companies = ciks_companies[:FIRST_N_COMPANY] if FIRST_N_COMPANY is not None else ciks_companies
    result_list = []
    for comp in tqdm(ciks_companies):
        result_list += __search_one_company_links(comp)

    __write_csv(result_list, 'out_sec10k.csv')


def parse_get_item1a(content: str):
    html = etree.HTML(content)
    content = ''.join(html.itertext()).replace(' ', ' ')
    lower_content = content.lower()
    pos_1b = lower_content.rfind('item 1b.')
    pos_1a = lower_content.rfind('item 1a.',0,pos_1b)
    if pos_1b==-1 and pos_1a==-1:
        pos_1b = lower_content.rfind('item 1b')
        pos_1a = lower_content.rfind('item 1a', 0, pos_1b)
    txt = content[pos_1a:pos_1b]
    return txt


    # content = content.decode('utf-8')
    # content = content.replace('&nbsp;', ' ')
    # soup = BeautifulSoup(content, 'html.parser')
    # full_txt = soup.get_text()
    # ps = [x for x in full_txt.strip().split('\n') if len(x.strip())>0]
    pos_1a = -1
    pos_1b = -1
    for i, p in enumerate(ps):
        head = p[:10].lower()
        if head.startswith('item 1a'):
            pos_1a = i
        if head.startswith('item 1b'):
            pos_1b = i
    if pos_1b == -1 or pos_1b == -1 or pos_1b<=pos_1a:
        return ''
    rs = ps[pos_1a:pos_1b]
    txt = '\n'.join(rs)
    return txt


def down_file(url, file_name):
    if os.path.isfile(file_name):
        return
    headers = {
        'referer': 'https://www.sec.gov/edgar/search/',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36'
    }
    response = requests.get(url, headers=headers,timeout=10)
    # with open(file_name.replace('txt', 'htm'), 'wb') as out_file:  # for debug
    #     out_file.write(response.content)

    txt = parse_get_item1a(response.content)
    with open(file_name, 'w', encoding='utf-8') as f:
        f.write(txt)


def down_docs():
    path = 'out_sec10k.csv'
    with open(path, 'r') as f:
        lines = f.readlines()
    lines = lines[1:]  # skip header
    lines = lines[:FIRST_N_COMPANY]
    for line in tqdm(lines):
        ss = line.strip().split('\t')
        url = ss[-1]
        cik = ss[0]
        display_name = ss[1]
        year = ss[2]
        file_name = '{}_{}.txt'.format(cik, year)
        path = os.path.join(DATA_DIR, file_name)
        try:
            down_file(url, path)
        except:
            print("Fail to down: " + path)


def debug(htm_file):
    with open(htm_file, 'r') as f:
        content = ' '.join(f.readlines())
    content = content.encode('utf-8')
    parse_get_item1a(content)


def merge_into_one_csv():
    base_csv = 'out_sec10k.csv'
    with open(base_csv, 'r', encoding='utf-8') as f:
        lines = [x.strip() for x in f.readlines()]
    d = {}
    for line in lines[1:]:
        ss = line.split('\t')
        cik, year = ss[0], ss[2]
        d[cik+'_'+year] = line
    print("read base csv done.")

    data_dir = 'data'
    for name in tqdm(os.listdir(data_dir)):
        if not name.endswith('.txt'):
            continue
        with open(os.path.join(data_dir,name), 'r',encoding='utf-8') as f:
            content = ' '.join([x.strip().replace('\t', ' ') for x in f.readlines()])
        # cik,year = ss[0], ss[1]
        name = name[:name.rfind('.')]
        d[name] += '\t'+content
        # d[name] += '\t'+content[:20]+'\n'  # TODO: debug
    print('merge done. starting to write final file')

    out_csv = 'final_sec10k.csv'
    # out_lines = []
    # for v in d.values():
    #     out_lines.append()
    # out_lines = [x + '\n' for x in d.values()]
    # out_lines = out_lines[:10] # TODO:debug
    with open(out_csv, 'w', encoding='utf-8')as f:
        for line in tqdm(d.values()):
            f.write(line + '\n')

if __name__ == '__main__':
    # 按顺序，分步执行search_and_store_all_links、down_docs、merge_into_one_csv，debug非必选
    # search_and_store_all_links()
    # down_docs()
    # debug('data/0000203527_2019.htm')
    merge_into_one_csv()