# re-use the data source of f10 so there's no f10a.py
# 最新公告，最新报道，业绩预告，特别处理
import csv
import pickle
import re
from collections import OrderedDict

from common import bin_storage


class ContentHandler:
    def __init__(self, g_path):
        self._res = dict()
        self.triplet_need_dump = False
        try:
            # test f10a/
            self.triplet_map = pickle.load(open(g_path + "f10a_triplet_map.pkl", 'rb'))
        except FileNotFoundError:
            print('no triplet map')
            self.triplet_map = dict()
            self.triplet_need_dump = True
        self.regex = [
            (re.compile('★最新公告[:：]?([^【★]*?)[【★]{1}'), "【公告】"),
            (re.compile('★最新报道[:：]?([^【★]*?)[【★]{1}'), "【报道】"),
            (re.compile('【业绩预告】[:：]?([^【★]*?)[【★]{1}'), '【预告】'),
            (re.compile('★特别处理[:：]?([^★└]+)'), '【处理】'),
            (re.compile('增发】[:：]?([^【★]*?)[【★]{1}'), '【增发】'),
            (re.compile('★股东户数变化[:：]?([^【★]*?)[【★]{1}'), '【户数变化】')
        ]

    def _removeDate(self, s):
        return re.sub('@[\d]{2}-[\d]{2}$', '', s)

    def _appendToField(self, ordered_dict, field, s):
        if field not in ordered_dict:
            ordered_dict[field] = []
        triplet = ordered_dict[field]
        # there is an update date '@01-01' attached to each msg
        # remove them and check whether to append the new msg
        if (len(triplet) > 1 and self._removeDate(triplet[-1]) != self._removeDate(s)) or len(triplet) == 0:
            self.triplet_need_dump = True
            triplet.append(s)
        while len(triplet) > 3:
            triplet.pop(0)

    def _tripletToRes(self, code):
        ordered_dict = self.triplet_map[code]
        s123 = ['', '', '']
        for field in ordered_dict:
            msg_list = ordered_dict[field]
            for i in range(len(msg_list)):
                if len(s123[i]) > 0:
                    s123[i] += '\n\n'
                s123[i] += field + ':' + msg_list[i]
        self._res[code] = s123

    def _filter(self, s):
        d = {"[｜┬┴─┼├┤└┘┌┐\s]+": ""}

        for i in d:
            s = re.sub(i, d[i], s)
        return s.strip()

    def _flag1(self, code, content):
        if content == "EMPTY":
            # build repr strings and note that there is a problem with the new data
            self._tripletToRes(code)
            self._res[code][-1] = 'NEW DATA UNAVAILABLE\n' + self._res[code][-1]
            return
        # apply new info to the preserved map
        self.extractor(self.triplet_map[code], content)
        # build strings for the three cols from the map
        self._tripletToRes(code)

    def extractor(self, ordered_dict, content):
        update_date = '@' + self._filter(re.search("更新日期[：:][\d]{4}-([\d]{2}-[\d]{2})◇", content).group(1))

        for i in self.regex:
            reg, name = i

            if name == '【增发】':
                s = ''
                t = re.findall(reg, content)
                if t:
                    for ti in t:
                        s += self._filter(ti) + '\n'
                    s += update_date
                    self._appendToField(ordered_dict, name, s)

            else:
                t = re.search(reg, content)
                if t:
                    self._appendToField(ordered_dict, name, self._filter(t.group(1)) + update_date)


# find interesting parts of a given raw csv
def run(path_to_process, g_path="/home/ubuntu/f10_serverside/"):
    with open(path_to_process, newline='', encoding="utf-8-sig", errors="ignore") as csvfile:
        reader = csv.DictReader(csvfile)
        handler = ContentHandler(g_path)

        for row in reader:
            code, content, flag = row["code"], row["content"], row["flag"]
            if flag != '1':
                continue
            if code not in handler.triplet_map:
                handler.triplet_map[code] = OrderedDict()
                handler.triplet_need_dump = True

            handler._flag1(code, content)
        if handler.triplet_need_dump is True:
            # test f10a/
            with open(g_path + 'f10a_triplet_map.pkl', 'wb') as f:
                pickle.dump(handler.triplet_map, f, protocol=pickle.HIGHEST_PROTOCOL)
            from shutil import copyfile
            from datetime import datetime
            copyfile(g_path + 'f10a_triplet_map.pkl',
                     g_path + 'f10a_triplet_map{}.pkl'.format(datetime.today().strftime('%Y-%m-%d')))
        ana_res = handler._res
        # print(ana_res['000001'])
        # for kode in ana_res:
        #     tri=ana_res[kode]
        #     if len(tri[1])>0:
        #         print(kode)
        #         [print(i) for i in tri]
        from datetime import datetime
        bin = bin_storage.Bin_storage()
        save_bin_name = g_path + 'bin/' + datetime.today().strftime('%Y-%m-%d')
        bin.save(save_bin_name, ana_res)

        print("--------------------- f10a finished.  --------------------")


if __name__ == '__main__':
    # run("res_2020-07-09.csv", g_path="")
    run("res_2020-07-15.csv", g_path="")
