import csv
import sys
from zzz import *
from rich import inspect
db = [i for i in os.listdir() if i.endswith("txt")]
textdb = [file.readstr(i) for i in db]
wrap_token = set("「『《“")
end_token = set("。！？…））］》』」”")
term_dict_path = "terms.csv"
term_dict = {}
with open(term_dict_path, "r", encoding="utf-8-sig") as f:
    filedata = list(csv.DictReader(f))
    for row in filedata:
        term_dict[row['cn']] = row['jp'] or row['hira'] or row['en'] or row['cn']


class tokenstate:
    none = 0
    inside = 1
    outside = 2


terms = {}


def add_term(term, conversation):
    if conversation:
        return
    term = term.strip().replace("\n", "")
    terms[term] = terms.get(term, 0) + 1


def do_ner():
    for article in textdb:
        state = tokenstate.none
        toks = []
        sent_begin = False
        conversation = False
        for char in article:
            newstate = tokenstate.none
            if char == "\n":
                sent_begin = True
            elif sent_begin:
                if char in wrap_token:
                    # conversation?
                    conversation = True
                sent_begin = False
            if char in wrap_token:
                if state != tokenstate.inside:
                    newstate = tokenstate.inside
                    toks.clear()
                else:
                    newstate = tokenstate.inside
                    toks.clear()
            elif char in end_token:
                if state != tokenstate.inside:
                    newstate = tokenstate.outside
                else:
                    term = "".join(toks)
                    add_term(term, conversation)
                    conversation = False
                    newstate = tokenstate.outside
                    toks.clear()
            else:
                toks.append(char)
                newstate = state
            state = newstate
    items = list(terms.items())
    items.sort(key=lambda x: x[0])
    for item in items:
        print(*item)


class trie:
    def __init__(self) -> None:
        self.data = {"count": 0}

    def add(self, value):
        p = self.data
        for i in value:
            if i not in p:
                p[i] = {"count": 1}
            else:
                p[i]["count"] += 1
            p = p[i]

    def count(self, cnt=None, prefix="", p=None):
        cnt = cnt or {}
        root = False
        if not p:
            root = True
            # go to next if root is a bridge
            if len(self.data.keys()) == 2:
                key = list(self.data.keys())
                if key[0] == 'count':
                    p = key[1]
                else:
                    p = key[0]
                prefix = prefix+p
                p = self.data[p]
            else:
                p = self.data
        l = len(list(p.keys()))
        # print(prefix, l, p.keys())
        if l == 1:
            # leaf
            assert prefix == "", f"leaf at {prefix} is {p.keys()}"
            return cnt
        if l == 2:
            # bridge
            c, child = None, None
            for k, v in p.items():
                if k == 'count':
                    continue
                else:
                    child = v
                    c = k
                    break
            assert c
            assert child
            if len(child.keys()) > 1:
                self.count(cnt, prefix+c, child)
            else:
                cnt[prefix+c] = p['count']
            return cnt
        cnt[prefix] = p['count']
        for k, v in p.items():
            if type(v) is dict:
                if len(v.keys()) > 1:
                    self.count(cnt, prefix + k, v)
                else:
                    cnt[prefix+k] = v['count']
        if root:
            for k in list(cnt.keys()):
                if len(k) == 1:
                    del cnt[k]
                elif cnt[k] == 0:
                    del cnt[k]
        return cnt

    def prefix(self):
        '''
        find the longest prefix of the trie
        '''
        root = self.data
        current = root
        prefix = []
        while current:
            keys = current.keys()
            if len(keys) != 2:
                break
            for k in keys:
                if k == 'count':
                    continue
                else:
                    current = current[k]
                    prefix.append(k)
                    break
        return "".join(prefix)


def do_find(head, min_count=0):
    min_count = int(min_count)
    head = head.strip().replace("\n", "")
    assert head
    t = trie()
    for article in textdb:
        occurences = reg.search(
            article, f"(?<={head})"+"[^。，、…！？：“『』「」—*]{0,10}|$")
        if occurences:
            # print(f"found {len(occurences)}，{occurences}")
            for i in occurences:
                t.add(i)
    occ = t.count()
    prefix = t.prefix()
    occ = sorted(occ.items(), key=lambda x: x[1], reverse=True)
    occ.sort(key=lambda x: x[0])
    cnt = 0
    for i in occ:
        if i[1] > min_count:
            cnt += i[1]
            print(head+i[0], '\t\t', i[1])
    print(f"prefix {head+prefix}, count {cnt}/{len(occ)}")


def do_opencc():
    '''
    著-> 着
    '''
    import opencc
    converter = opencc.OpenCC('tw2s.json')
    cv2 = opencc.OpenCC('s2tw.json')
    for i in range(len(textdb)):
        new = converter.convert(cv2.convert(textdb[i]))
        sent = []
        for j, char in enumerate(textdb[i]):
            if char == "著" and new[j] == "着":
                sent.append("着")
            else:
                sent.append(char)
        textdb[i] = "".join(sent)
        file.write(db[i], textdb[i])


def find_term():
    not_found = []
    for term in term_dict:
        print(f"test {term}")
        flag = True
        for article in textdb:
            if term in article:
                flag = False
                break
        if flag:
            not_found.append(term)
    print(not_found)


def do_count():
    import opencc
    cvt = opencc.OpenCC('tw2s')
    cnt = {}
    for article in textdb:
        for word in article:
            if word in cnt:
                cnt[word] += 1
            else:
                cnt[word] = 1
    cnt = sorted(cnt.items(), key=lambda x: x[1])
    for i in cnt:
        if i[1] > 10:
            break
        if cvt.convert(i[0]) != i[0]:
            print(i[0], end="")


def do_collect():
    '''
    collect item terms
    '''
    start_token = list('「『')
    endd_token = list('」』')
    state = tokenstate.none
    lstate = tokenstate.none
    t = {}
    arr = []
    alllines = []
    for article in textdb:
        alllines.extend(article.splitlines())
    for line in alllines:
        if not line:
            continue
        state = tokenstate.none
        for char in line[1:]:
            if char in start_token:
                state = tokenstate.inside
                arr.clear()
            elif char in endd_token:
                state = tokenstate.outside
            else:
                arr.append(char)
                pass
            if state == lstate:
                continue
            if state == tokenstate.outside:
                tt = ""
                if len(arr) <= 10:
                    tt = ''.join(arr)
                arr.clear()
                if tt:
                    if tt in t:
                        t[tt] += 1
                    else:
                        t[tt] = 1
            lstate = state
    sort_arr = sorted(t.items(), key=lambda x: x[1], reverse=True)
    for i in sort_arr:
        print(i[0], '\t'*8, i[1])


class mstate:
    none = 0
    match = 1
    miss = 2


def create_machine(q):
    '''
    创造一个状态机，模糊匹配第二个字以后
    例如：abc->adc,abd
    '''
    nodes = list(q)
    l = len(nodes)
    half = (l+1)//2

    def fn(text):
        matches = set()
        state = mstate.none
        index = 0
        misses = 0
        for i in text:
            if index == l:
                # complete match or miss match
                matches.add("".join(nodes[:index]))
                index = 0
                misses = 0
                state = mstate.none
            elif i == nodes[index]:
                # partial match
                index += 1
                state = mstate.match
            elif state == mstate.match:
                # first miss
                misses = 1
                index += 1
                state = mstate.miss
            elif state == mstate.miss:
                # more miss
                if misses >= half:
                    if index > 0:
                        print(f"fail at {i}={nodes[:index]}")
                    # fail
                    index = 0
                    misses = 0
                    state = mstate.none
                else:
                    index += 1
                    misses += 1
            if state == mstate.miss and i == nodes[0]:
                matches.add("".join(nodes[:index]))
                state = mstate.match
                index = 1
                misses = 0

        return matches
    return fn


def do_fussy_match(q):
    '''
    broken
    '''
    m = set()
    machine = create_machine(q)
    for article in textdb:
        matches = machine(article)
        m.update(set(matches))
    for i in m:
        print(i)


if __name__ == '__main__':
    cmd = sys.argv[1]
    args = sys.argv[2:]
    if cmd == 'f':
        do_find(*args)
    elif cmd == 't':
        find_term()
    elif cmd == 'c':
        do_collect()
    elif cmd == 'm':
        do_fussy_match(*args)
    # do_opencc()
    # do_count()
