import paddle
import random
import collections
import collections.abc
import os, sys, six, re, json
import numpy as np
from collections import defaultdict


def search(pattern, sequence):
    """从sequence中寻找子串pattern
    如果找到，返回第一个下标；否则返回-1。
    """
    res = []
    n = len(pattern)
    for i in range(len(sequence)):
        if sequence[i:i + n] == pattern:
            res.append(i)
    return res


#
# def biaffine_collate(batch):
#     inputs_ids, mask, labels = list(zip(*[x.values() for x in batch]))
#     inputs_ids = torch.from_numpy(sequence_padding(inputs_ids))
#     mask = torch.from_numpy(sequence_padding(mask))
#     labels = torch.from_numpy(np.array([x for y in labels for x in y], dtype='int64'))
#     return {'input_ids': inputs_ids, 'attention_mask': mask, 'labels': labels}


def seed_everything(seed):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    paddle.seed(seed)


def write_result(file, best_epoch, best):
    print_str = 'best epoch: {}\tbest_metrics: {}'.format(best_epoch, best)
    print(print_str)
    file_name = os.path.split(file)[-1][:-3]
    write_str = file_name + '\t' + print_str + '\n'
    with open('result.txt', 'a+') as f:
        f.write(write_str)


def read_json_lines(file):
    return [json.loads(x) for x in open(file, encoding='utf-8')]


def write_json_lines(lines, out_path):
    with open(out_path, 'w+', encoding='utf-8') as f:
        for line in lines:
            line = json.dumps(line, ensure_ascii=False) + '\n'
            f.write(line)


def build_vocab(inp):
    word_freq = collections.defaultdict(int)
    for i in inp:
        for j in i:
            word_freq[j] += 1
    return word_freq


def prepare(args):
    if not os.path.exists(args.cache_dir):
        os.mkdir(args.cache_dir)
    if not os.path.exists(args.log_dir):
        os.mkdir(args.log_dir)
    if not os.path.exists(args.save_dir):
        os.mkdir(args.save_dir)


def pickle_save(obj, name):
    import pickle
    with open(name, 'wb+') as f:
        pickle.dump(obj, f)


def pickle_load(name):
    import pickle
    with open(name, 'rb') as f:
        return pickle.load(f)


def string_matching(s, keywords):
    """判断s是否至少包含keywords中的至少一个字符串
    """
    for k in keywords:
        if re.search(k, s):
            return True
    return False


def parallel_apply(func, iterable, workers, max_queue_size, callback=None, dummy=False, random_seeds=True):
    """多进程或多线程地将func应用到iterable的每个元素中。
    注意这个apply是异步且无序的，也就是说依次输入a,b,c，但是
    输出可能是func(c), func(a), func(b)。
    参数：
        callback: 处理单个输出的回调函数；
        dummy: False是多进程/线性，True则是多线程/线性；
        random_seeds: 每个进程的随机种子。
    """
    if dummy:
        from multiprocessing.dummy import Pool, Queue
    else:
        from multiprocessing import Pool, Queue

    in_queue, out_queue, seed_queue = Queue(max_queue_size), Queue(), Queue()
    if random_seeds is True:
        random_seeds = [None] * workers
    elif random_seeds is None or random_seeds is False:
        random_seeds = []
    for seed in random_seeds:
        seed_queue.put(seed)

    def worker_step(in_queue, out_queue):
        """单步函数包装成循环执行
        """
        if not seed_queue.empty():
            np.random.seed(seed_queue.get())
        while True:
            i, d = in_queue.get()
            r = func(d)
            out_queue.put((i, r))

    # 启动多进程/线程
    pool = Pool(workers, worker_step, (in_queue, out_queue))

    if callback is None:
        results = []

    # 后处理函数
    def process_out_queue():
        out_count = 0
        for _ in range(out_queue.qsize()):
            i, d = out_queue.get()
            out_count += 1
            if callback is None:
                results.append((i, d))
            else:
                callback(d)
        return out_count

    # 存入数据，取出结果
    in_count, out_count = 0, 0
    for i, d in enumerate(iterable):
        in_count += 1
        while True:
            try:
                in_queue.put((i, d), block=False)
                break
            except six.moves.queue.Full:
                out_count += process_out_queue()
        if in_count % max_queue_size == 0:
            out_count += process_out_queue()

    while out_count != in_count:
        out_count += process_out_queue()

    pool.terminate()

    if callback is None:
        results = sorted(results, key=lambda r: r[0])
        return [r[1] for r in results]


def sequence_padding(inputs, length=None, value=0, seq_dims=1, mode='post', dtype='int64'):
    """Numpy函数，将序列padding到同一长度
    """
    if length is None:
        length = np.max([np.shape(x)[:seq_dims] for x in inputs], axis=0)
    elif not hasattr(length, '__getitem__'):
        length = [length]

    slices = [np.s_[:length[i]] for i in range(seq_dims)]
    slices = tuple(slices) if len(slices) > 1 else slices[0]
    pad_width = [(0, 0) for _ in np.shape(inputs[0])]

    outputs = []
    for x in inputs:
        x = x[slices]
        for i in range(seq_dims):
            if mode == 'post':
                pad_width[i] = (0, length[i] - np.shape(x)[i])
            elif mode == 'pre':
                pad_width[i] = (length[i] - np.shape(x)[i], 0)
            else:
                raise ValueError('"mode" argument must be "post" or "pre".')
        x = np.pad(x, pad_width, 'constant', constant_values=value)
        outputs.append(x)

    return np.array(outputs, dtype=dtype)


def segment_text(text, maxlen, seps='\n', strips=None):
    """将文本按照标点符号划分为若干个短句
    """
    text = text.strip().strip(strips)
    if seps and len(text) > maxlen:
        pieces = text.split(seps[0])
        text, texts = '', []
        for i, p in enumerate(pieces):
            if text and p and len(text) + len(p) > maxlen - 1:
                texts.extend(segment_text(text, maxlen, seps[1:], strips))
                text = ''
            if i + 1 == len(pieces):
                text = text + p
            else:
                text = text + p + seps[0]
        if text:
            texts.extend(segment_text(text, maxlen, seps[1:], strips))
        return texts
    else:
        return [text]


def longest_common_substring(source, target):
    """最长公共子串（source和target的最长公共切片区间）
    返回：子串长度, 所在区间（四元组）
    注意：最长公共子串可能不止一个，所返回的区间只代表其中一个。
    """
    c, l, span = defaultdict(int), 0, (0, 0, 0, 0)
    for i, si in enumerate(source, 1):
        for j, tj in enumerate(target, 1):
            if si == tj:
                c[i, j] = c[i - 1, j - 1] + 1
                if c[i, j] > l:
                    l = c[i, j]
                    span = (i - l, i, j - l, j)
    return l, span


def longest_common_subsequence(source, target):
    """最长公共子序列（source和target的最长非连续子序列）
    返回：子序列长度, 映射关系（映射对组成的list）
    注意：最长公共子序列可能不止一个，所返回的映射只代表其中一个。
    """
    c = defaultdict(int)
    for i, si in enumerate(source, 1):
        for j, tj in enumerate(target, 1):
            if si == tj:
                c[i, j] = c[i - 1, j - 1] + 1
            elif c[i, j - 1] > c[i - 1, j]:
                c[i, j] = c[i, j - 1]
            else:
                c[i, j] = c[i - 1, j]
    l, mapping = c[len(source), len(target)], []
    i, j = len(source) - 1, len(target) - 1
    while len(mapping) < l:
        if source[i] == target[j]:
            mapping.append((i, j))
            i, j = i - 1, j - 1
        elif c[i + 1, j] > c[i, j + 1]:
            j = j - 1
        else:
            i = i - 1
    return l, mapping[::-1]


def paddle_collate(batch):
    import paddle
    keys = batch[0].keys()
    values = [paddle.to_tensor(sequence_padding(x)) for x in zip(*[_.values() for _ in batch])]
    return dict(zip(keys, values))

def mask_select(inputs, mask):
    input_dim = inputs.ndim
    mask_dim = mask.ndim
    mask = mask.flatten().astype('bool')
    if input_dim > mask_dim:
        inputs = inputs.reshape([*mask.shape, -1])[mask]
    else:
        inputs = inputs.flatten()[mask]
    return inputs

class TrieNode:
    def __init__(self):
        self.children = collections.defaultdict(TrieNode)
        self.is_w = False


class Trie:
    def __init__(self, w_list=None):
        self.root = TrieNode()
        if w_list is not None:
            for w in w_list:
                self.insert(w)
        self.vocab2id = dict([(v, i) for i, v in enumerate(w_list)])

    def insert(self, w):
        current = self.root
        for c in w:
            current = current.children[c]
        current.is_w = True

    def search(self, w):
        '''
        :param w:
        :return:
        -1:not w route
        0:subroute but not word
        1:subroute and word
        '''
        current = self.root
        for c in w:
            current = current.children.get(c)
            if current is None:
                return -1
        if current.is_w:
            return 1
        else:
            return 0

    def get_lexicon(self, sentence):
        result = []
        for i in range(len(sentence)):
            current = self.root
            for j in range(i, len(sentence)):
                current = current.children.get(sentence[j])
                if current is None:
                    break
                if current.is_w:
                    result.append([i, j, self.vocab2id[sentence[i:j + 1]]])
        return result
