import csv
import pickle
import re
from collections import OrderedDict


def _filter(s):
    d = {"[┬┴─┼├┤└┘┌┐]+": ""}
    for i in d:
        s = re.sub(i, d[i], s)
    return s.rstrip('\r\n').rstrip('｜').rstrip('\r\n')


def buildRow(l):
    if len(l) > 6:
        l.pop()
    return '｜'.join(l)


def insertAtFirstCol(o, n, n_tab_col=1):
    s = ''
    o_rows = o.split('\n')
    n_rows = n.split('\n')
    headers = OrderedDict()
    # assume that the new table does not have this row
    # shift right(addBlankToLeft) to match the date change
    for r in o_rows:
        cells = r.split('｜')
        headers[cells[0].strip()] = [cells, 'addBlankToLeft']
    for r in n_rows:
        cells = r.split('｜')
        if cells[0].strip() in headers:
            # the 2 rows match, prepend the first cell(column) of the new table to the old
            headers[cells[0].strip()][1] = cells
        else:
            # the old table does not have this row
            # shift left(addBlankToRight) to match the date change
            headers[cells[0].strip()] = [cells, 'addBlankToRight']
    for h in headers:
        to_merge = headers[h]
        cells = to_merge[0]
        if type(to_merge[1]) == list:
            n_cells = to_merge[1]
            cells.insert(1, n_cells[n_tab_col])
        else:
            if to_merge[1] == 'addBlankToLeft':
                cells.insert(1, '          ')
            else:
                cells.append('          ')
        s += buildRow(cells) + '\n'
    return s.rstrip('\n')


def removeFirstCol(s):
    r = ''
    for l in s.split('\n'):
        r += re.sub('｜[^｜]+', '', l, 1) + '\n'

    return r.rstrip('\n')


def run(path, g_path="/home/ubuntu/f10_serverside/"):
    # load map for the second table, create one in mem & needDump=True if non-exist
    quarterly_need_dump = False
    yearly_need_dump = False
    try:
        quarterly_map = pickle.load(open(g_path + "f12/f12_quarterlyTable.pkl", 'rb'))
    except FileNotFoundError:
        print('no Q map')
        quarterly_map = dict()
        quarterly_need_dump = True
    try:
        yearly_map = pickle.load(open(g_path + "f12/f12_yearlyTable.pkl", 'rb'))
    except FileNotFoundError:
        print('no Y map')
        yearly_map = dict()
        yearly_need_dump = True
    with open(path, newline='', encoding="utf-8-sig", errors="ignore")as f:
        reader = csv.DictReader(f)
        field_w_code = {}
        regex = [re.compile('(财务指标.*?)\n'),
                 re.compile('(审计意见.*?)｜?\n｜?(.*?)｜?\n'),
                 re.compile('(净利润\(万元\).*?)\n'),
                 re.compile('(净利润增长率\(%\).*?)\n'),
                 re.compile('(营业总收入\(万元\).*?)\n'),
                 re.compile('(营业总收入增长率\(%\).*?)\n'),
                 re.compile('(加权净资产收益率\(%\).*?)\n'),
                 re.compile('(资产负债比率\(%\).*?)\n'),
                 re.compile('(净利润现金含量\(%\).*?)\n'),
                 ]
        for row in reader:
            code, content, flag = row["code"], row["content"], row["flag"]
            # put empty to the table rather than making the whole table unavailable?
            if content == 'EMPTY':
                field_w_code[code] = [
                    "#NEW DATA UNAVAILABLE#\n" + yearly_map[code][0] if code in yearly_map else "-NO HISTORICAL DATA-",
                    "#NEW DATA UNAVAILABLE#\n" + quarterly_map[code][1] if code in quarterly_map and len(
                        quarterly_map[code]) == 2 else "-NO HISTORICAL DATA-"]
                continue

            first_table = ""
            try:
                s = re.search('【主要财务指标】([^【]*?)【', content).group(1)
                if s.count('财务指标') == 1:

                    s = re.search('【主要财务指标】[^】]*?(财务指标[^【]*?)【', content).group(1)

                    for reg in regex:
                        o = re.search(reg, s)
                        if o:
                            o = '\n'.join([_filter(x) for x in o.groups()])
                            first_table += o + '\n'
                    first_table = removeFirstCol(first_table.rstrip())
                    # get latest date, 1st col
                    latest_date = re.search('财务指标[\s]*｜[\s]*([\d-]{10})[\s]*｜', first_table).group(1)

                    if code not in yearly_map:
                        yearly_map[code] = (latest_date, first_table)
                        yearly_need_dump = True
                    else:
                        if latest_date != yearly_map[code][0]:
                            # old & new tables both have the 1st cols removed
                            # ignore dealing with data backward modification
                            first_table = insertAtFirstCol(yearly_map[code][1], first_table)
                            yearly_map[code] = (latest_date, first_table)
                            yearly_need_dump = True
                        else:
                            first_table = yearly_map[code][1]
                    field_w_code[code] = [first_table]
                else:
                    # first table
                    s = re.search('【主要财务指标】[^】]*?(财务指标[^】]*?)财务指标', content).group(1)
                    first_table = ''
                    for reg in regex:
                        o = re.search(reg, s)
                        if o:
                            o = '\n'.join([_filter(x) for x in o.groups()])
                            first_table += o + '\n'
                    first_table = removeFirstCol(first_table.rstrip())
                    # get latest date, 1st col
                    latest_date = re.search('财务指标[\s]*｜[\s]*([\d-]{10})[\s]*｜', first_table).group(1)

                    if code not in yearly_map:
                        yearly_map[code] = (latest_date, first_table)
                        yearly_need_dump = True
                    else:
                        if latest_date != yearly_map[code][0]:
                            # old & new tables both have the 1st cols removed
                            # ignore dealing with data backward modification
                            first_table = insertAtFirstCol(yearly_map[code][1], first_table)
                            yearly_map[code] = (latest_date, first_table)
                            yearly_need_dump = True
                        else:
                            first_table = yearly_map[code][1]
                    field_w_code[code] = [first_table]
                    # second table
                    second_table = ''
                    s = re.search('【主要财务指标】[^】]*?财务指标[^【]*?(财务指标[^【]*?)【', content).group(1)
                    # make table
                    for reg in regex:
                        o = re.search(reg, s)
                        if o:
                            o = '\n'.join([_filter(x) for x in o.groups()])
                            # o = _filter(o)
                            second_table += o + '\n'
                    second_table = second_table.rstrip('\n')

                    # get latest date, 1st col
                    latest_date = re.search('财务指标[\s]*｜[\s]*([\d-]{10})[\s]*｜', s).group(1)

                    # compare /w saved map - {code:(date, tableStr)}
                    # if code not exist in the map
                    if code not in quarterly_map:
                        quarterly_need_dump = True
                        quarterly_map[code] = (latest_date, second_table)
                        # else
                    else:
                        if latest_date != quarterly_map[code][0]:
                            #  first col of the new table insert before that of tableStr, while checking to truncate if too many cols
                            # ignore dealing with data backward modification
                            second_table = insertAtFirstCol(quarterly_map[code][1], second_table)
                            quarterly_map[code] = (latest_date, second_table)
                            quarterly_need_dump = True
                        else:
                            second_table = quarterly_map[code][1]

                    field_w_code[code].append(second_table)
            except Exception as e:
                print('Error occurred when processing f12 data')
                print(code, str(e))
        # dump map if needDump
        if quarterly_need_dump is True:
            with open(g_path + 'f12/f12_quarterlyTable.pkl', 'wb') as f:
                pickle.dump(quarterly_map, f, protocol=pickle.HIGHEST_PROTOCOL)
            from shutil import copyfile
            from datetime import datetime
            copyfile(g_path + 'f12/f12_quarterlyTable.pkl',
                     g_path + 'f12/f12_quarterlyTable{}.pkl'.format(datetime.today().strftime('%Y-%m-%d')))
        if yearly_need_dump is True:
            with open(g_path + 'f12/f12_yearlyTable.pkl', 'wb') as f:
                pickle.dump(yearly_map, f, protocol=pickle.HIGHEST_PROTOCOL)
            from shutil import copyfile
            from datetime import datetime
            copyfile(g_path + 'f12/f12_yearlyTable.pkl',
                     g_path + 'f12/f12_yearlyTable{}.pkl'.format(datetime.today().strftime('%Y-%m-%d')))
        return field_w_code


if __name__ == "__main__":
    ss = run('res.csv')['300314']
    print(ss[0])
    print('#####################################')
    print(ss[1])
