from typing import List
from openpyxl import load_workbook
from bs4 import BeautifulSoup, Tag
import requests
import locale
from collections import defaultdict
from collections import Counter
from openpyxl.styles import colors
from openpyxl.styles import Font, Color

RED_FONT = Font(color="FF0000")

locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
# sample_url = 'https://www.sec.gov/Archives/edgar/data/0001628871/000121390017003179/f10k2016_tempusapplied.htm'
# sample_htm_path = 'dataset/sample.htm'
# SAMPLE_METRIC = 'Total Current Assets'
METRICS = ['''Total Current Assets''', '''Total Assets''', '''Total current liabilities''',
           '''Total long term liabilities''', '''Retained Earnings''', '''Total stockholders’ equity''',
           '''Total stockholder's deficit''', '''Total liabilities and stockholders' equity''',
           '''TOTAL LIABILITIES AND STOCKHOLDERS’ DEFICIT''',
           '''Total revenue''', '''Gross profit''', '''Net income''', '''Net Loss''',
           '''Net loss from continuing operations''', '''Net (loss) income''']
EMPTY_CELL_VALUE = '\xa0'
MIN_STR_DISTANCE = 3


def parse_file(path, metrics: List[str]):
    with open(path, 'r', encoding='utf-8') as f:
        soup = BeautifulSoup(f, 'html.parser')
    mv = get_metrics_values(soup, metrics)
    return mv


def parse_str(html_doc: str, metrics: List[str]):
    soup = BeautifulSoup(html_doc, 'html.parser')
    mv = get_metrics_values(soup, metrics)
    return mv


def get_metrics_values(soup, metrics):
    mv = defaultdict(list)
    for table in soup.find_all('table'):
        ll = read_table_all_values(table)
        # table_title = get_first_tr_as_table_title(ll)
        for m in metrics:
            metric, value = filter_table(ll, m)
            if metric:
                mv[metric].append(value)
    for k, v in mv.items():
        mv[k] = [item for items, c in Counter(v).most_common()
                 for item in [items] * c]
    return mv


def read_table_all_values(table: Tag) -> bool:
    """
    skip '\xa0' line
    :param table:
    :return:
    """
    rows = []
    trs = table.find_all('tr')
    for tr in trs:
        row = []
        tds = tr.find_all('td')

        for i, td in enumerate(tds):
            cell_value = td.get_text()
            row.append(cell_value)
        for v in row:
            if v != EMPTY_CELL_VALUE:
                rows.append(row)
                break
    return rows


def filter_table(ll: List[List[str]], metric) -> (bool, List[str]):
    for l in ll:
        v = l[0]
        if v is not None and v != EMPTY_CELL_VALUE \
                and levenshtein_distance(v.lower(), metric.lower()) <= MIN_STR_DISTANCE:
            number = get_first_number(l)
            if number:
                return metric, number
    return None, None


def down_content(url):
    headers = {
        'referer': 'https://www.sec.gov/edgar/search/',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36'
    }
    resp = requests.get(url, headers=headers, timeout=10)
    return resp.text


def try_convert_to_number(value: str):
    try:
        if value.startswith('('):
            value = value.replace('(', '-')
        if '.' in value:
            number = locale.atof(value)
        else:
            number = locale.atoi(value)
    except:
        return None
    return number


def get_first_number(l: List[str]):
    for v in l:
        number = try_convert_to_number(v)
        if number:
            return number
    return None


def levenshtein_distance(str1, str2):
    """
    :param str1
    :param str2
    :return:
    """
    matrix = [[i + j for j in range(len(str2) + 1)] for i in range(len(str1) + 1)]
    for i in range(1, len(str1) + 1):
        for j in range(1, len(str2) + 1):
            if str1[i - 1] == str2[j - 1]:
                d = 0
            else:
                d = 1
            matrix[i][j] = min(matrix[i - 1][j] + 1, matrix[i][j - 1] + 1, matrix[i - 1][j - 1] + d)
    return matrix[len(str1)][len(str2)]


def main():
    filename = 'dataset/10-K URL.xlsx'
    out_file = 'result/10-K URL_with_result.xlsx'
    wb = load_workbook(filename=filename)
    # read metrics
    ws = wb['common']
    metrics = []

    for i, r in enumerate(ws.rows):
        if i == 0:
            continue
        metrics.append(r[1].value)
    print(metrics)

    # write result columns
    metrics_to_column_index = {}
    ws = wb['result']
    max_column = ws.max_column
    for m in metrics:
        max_column += 1
        cell = ws.cell(row=1, column=max_column)
        metrics_to_column_index[m] = max_column
        cell.value = m
    # get values of metrics from html, and write in excel
    for i, r in enumerate(ws.rows):
        # if i > 10:  # for debug
        #     break
        url = r[8].value
        if not url.startswith('http'):
            continue
        mv = parse_str(down_content(url), metrics)
        print("processing: ", i, mv)
        for k, v in mv.items():
            column_index = metrics_to_column_index[k]
            cell = ws.cell(row=i + 1, column=column_index)
            cell.value = v[0]
            if len(v) > 1:
                cell.font = RED_FONT
    wb.save(out_file)


if __name__ == '__main__':
    # sample_url = 'https://www.sec.gov/Archives/edgar/data/0001628871/000121390017003179/f10k2016_tempusapplied.htm'
    # content = down_content(sample_url)
    # mv = parse_str(content, METRICS)
    # print(mv)
    main()
