# """自动识别正则表达式"""

import re
import logging
from collections import defaultdict

logger = logging.getLogger("autore")


def DBC2SBC(ustring):
    rstring = ""
    for uchar in ustring:
        inside_code = ord(uchar)
        if inside_code == 0x3000:
            inside_code = 0x0020
        else:
            inside_code -= 0xfee0
        if not (0x0021 <= inside_code and inside_code <= 0x7e):
            rstring += uchar
            continue
        rstring += chr(inside_code)
    return rstring


def SBC2DBC(ustring):
    rstring = ""
    for uchar in ustring:
        inside_code = ord(uchar)
        if inside_code == 0x0020:
            inside_code = 0x3000
        else:
            if not (0x0021 <= inside_code and inside_code <= 0x7e):
                rstring += uchar
                continue
        inside_code += 0xfee0
        rstring += chr(inside_code)
    return rstring


CH_ESCAPE = {"*", "\'", "\"", "[", "]", ".", "(", ")", "{", "}", "\\", "?", "+", "^", "$", "|", "@", "#", "!", ",",
             "_", "%", "&", "\/", ";", "=", ":"}
CH_DIGITS = '0123456789'
CH_PUNCTUATION = r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
CH_WHITESPACE = ' \t\n\r\v\f　'
CH_SYMBOL = CH_PUNCTUATION + CH_WHITESPACE
CH_SYMBOL += SBC2DBC(CH_SYMBOL)
CH_SYMBOL += """ 。 ？ ！ ， 、 ； ： “ ” ‘ ' （ ） 《 》 〈 〉 【 】 『 』 「 」 ﹃ ﹄ 〔 〕 … — ～ ﹏ ￥"""

CH_QUOTE_L = "'\"‘“"
CH_QUOTE_R = "'\"’”"
CH_QUOTE_MAP = {
    "'": "'",
    "\"": "\"",
    "‘": "’",
    "”": "“",
}

CH_ENCLOSE_L = "(<{['\"‘“"
CH_ENCLOSE_R = ")>}]'\"’”"
CH_ENCLOSE_MAP = {
    "'": "'",
    "\"": "\"",
    "‘": "’",
    "“": "”",
    "(": ")",
    "<": ">",
    "[": "]",
    "{": "}",
}

CH_ENCLOSE_L += SBC2DBC(CH_ENCLOSE_L)
CH_ENCLOSE_R += SBC2DBC(CH_ENCLOSE_R)

DBC_ENCLOSE_MAP = dict()
for k, v in CH_ENCLOSE_MAP.items():
    DBC_ENCLOSE_MAP[SBC2DBC(k)] = SBC2DBC(v)
CH_ENCLOSE_MAP.update(DBC_ENCLOSE_MAP)

DIGITS_BY_SEPARATOR = 0  # 使用非特殊字符类表示数字，例如[^ ]
DIGITS_LENGTH = 1  # 匹配数字个数
DIGITS_EXACTLY = 2
DIGITS_BY_WORD = 3  # 使用word来表示数字

WORD_BY_SEPARATOR = 0  # 使用非特殊字符类表示word，例如[^ ]
WORD_LENGTH = 1  # 匹配word的个数
WORD_EXACTLY = 2

SYMBOL_BY_SEPARATOR = 0  # 使用非特殊字符类表示word，例如[^ ]
SYMBOL_LENGTH = 1  # 匹配symbo个数
SYMBOL_EXACTLY = 2
SYMBOL_BY_WORD = 3

SEPARATOR_FUZZY = 0
SEPARATOR_LENGTH = 1
SEPARATOR_EXACTLY = 2

MATCH_FUZZY = 0  # 模糊匹配，匹配字符个数
MATCH_LENGTH = 1  # 模糊匹配，匹配字符个数
MATCH_EXACTLY = 2  # 精确匹配


def iterable2escape(it, order=False):
    delimiter = ""
    if not order:
        it = set(it)

    # if all(d in CH_WHITESPACE for d in it):
    #    return r'\s'

    for d in it:
        if d in CH_ESCAPE:
            d = "\\" + d
        delimiter += d
    return delimiter


def separator_find(s, symbols=CH_SYMBOL):
    separator = set()
    for c in s:
        if c in symbols:
            separator.add(c)
    return separator


def add_name_group(pattern, name=None):
    return f"(?P<{name}>{pattern})" if name else pattern


def add_anonym_group(pattern):
    return f"(?:{pattern})"


def separator_pattern(s, separator):
    seps = iterable2escape(separator)

    if len(separator) == 1 and separator in CH_WHITESPACE:
        more = "+"
    else:
        more = ""
    # 如果同时给定了多个分隔符，则在匹配最后的分隔符时，使用+表示多个分隔符的匹配
    find = re.findall(f"([^{seps}]*[{seps}]{more})", s)
    pattern = ""

    if find:
        if len(find) > 1:
            pattern += f"(?:[^{seps}]*[{seps}]{more}){{{len(find)}}}"
        elif len(find) == 1:
            pattern += f"[^{seps}]*[{seps}]{more}"

    if s[-1] not in separator:
        pattern += f"[^{seps}]+"

    return pattern


def create_pattern_with_quotes(s):
    pattern = ""
    groups = []
    quote_pattern = "\"[^\"]*?\"|'[^\']*?'|‘[^’]*?’|“[^”]*?”"
    quote_pattern += "＂[^＂]*?＂|＇[^＇]*?＇"
    pg = None

    # 提取所有的引号对
    for cg in re.finditer(f"{quote_pattern}", s):
        # 引号对的第一个左引号和右引号
        quote_l = cg.group()[0]
        quote_r = cg.group()[-1]
        if pg:  # 第二个及以上引号对
            if pg.end() != cg.start():  # 两个引号对之间存在非引号字符，添加非引号字符串的正则表达式
                group_pattern = f"[^{quote_l}]+"
                groups.append(dict(field=s[pg.end():cg.start()],
                                   pos=pg.end(), size=cg.start() - pg.end(),
                                   quote=False, pattern=group_pattern))
                pattern += group_pattern
        else:  # 第一个引号对
            if cg.start():  # 第一个引号对的起始位置不为0，那么该引号的前面部分可以用非引号作为正则表达式
                group_pattern = f"[^{quote_l}]+"  # 非引号的正则表达式
                groups.append(dict(field=s[:cg.start()],
                                   pos=0, size=cg.start(),
                                   quote=False, pattern=group_pattern))
                pattern += group_pattern

        # 该引号对的完整正则表达式（包含左引号和右引号）
        group_pattern = f"{quote_l}[^{quote_r}]*?{quote_r}"

        groups.append(dict(field=cg.group(),
                           pos=cg.start(), size=cg.end() - cg.start(),
                           quote=True, pattern=group_pattern))

        # 将引号对的正则表达式串联起来得到整个s的正则表达式
        pattern += group_pattern
        pg = cg

    # 最后一个group，如果没有被引号包含，不为其生成pattern，由外部进行处理
    if groups:
        end = groups[-1]["pos"] + groups[-1]["size"]
        if end < len(s):  # 如果end == len(s)，说明最后一个字符也是引号对
            groups.append(dict(field=s[end:],
                               pos=end, size=len(s) - end,
                               quote=False, pattern=""))
    else:
        # 不存在引号对，返回的pattern为空字符串
        groups.append(dict(field=s,
                           pos=0, size=len(s),
                           quote=False, pattern=""))

    return groups, pattern


def string_isalpha(s):
    return re.fullmatch("[a-zA-Z]+", s)


def string_chinese(s):
    return all(0x4e00 <= ord(v) <= 0x9fa5 for v in s)


def create_string_pattern(string, neighber="", create_prefix=True):
    if not string:
        return ""

    # 检查前缀中是否包含引号相关字符串，如果包含成对的引号，通过groups返回这些成对的引号位置信息，pattern为这些引号对的正则
    # 例如：原始字符串 wan1" sessionid=0
    # string=wan1，neighber=" sessionid=0， create_prefix=False
    # 当没有引号对的时候返回：([{'field': 'wan1', 'pos': 0, 'size': 4, 'quote': False, 'pattern': ''}], '')
    groups, pattern = create_pattern_with_quotes(string)
    if groups[-1]['quote']:
        return pattern

    string = groups[-1]['field']
    if create_prefix:  # 如果是创建的前缀正则，优先使用前缀的最后一个字符作为分隔符
        separator = string[-1]
    else:  # 如果是创建value的正则，优先使用后缀的第一个字符作为分隔符
        # print(f"create_string_pattern: {string} neighber={neighber} ")
        separator = neighber[0]

    # logger.debug(f"string={string}, separator={separator}")
    if separator in CH_SYMBOL and separator != "_":
        pattern += separator_pattern(string, separator=separator)
    else:
        # 例如：s="A=B C=D E=FGHIJK"，用户选定的是HIJK这4个字符
        if create_prefix:
            separator = neighber[0]
        else:
            separator = string[-1]
        if separator in CH_SYMBOL and separator != "_":
            # 注意：此处是跨越了邻接边界
            pattern += separator_pattern(string, separator=separator)
        else:  # 邻接字符都不是特殊字符，只能在string内部去找特殊字符进行分割了
            separator = separator_find(string)
            if separator:
                # 先生成=左边的正则
                s = ""
                for c in string[::-1]:
                    if c in CH_SYMBOL and c != "_":
                        separator = c
                        break
                    s = c + s

                string = string[:-len(s)]
                if string:
                    pattern += separator_pattern(string, separator)
                pattern += f"[\s\S]{{{len(s)}}}"
            else:
                pattern += f"[\s\S]{{{len(string)}}}"
    return pattern


def create_prefix_pattern(prefix, value):
    return create_string_pattern(prefix, neighber=value, create_prefix=True)


def create_value_pattern(name, value, suffix, neighbor_next):
    if not suffix:
        if suffix is None:  # 匹配尾部
            pattern = r"[\s\S]+$"
        else:  # 连续的两个字段中间没有分隔符
            if neighbor_next in CH_SYMBOL:
                # 邻接字符是特殊字符
                separator = iterable2escape(neighbor_next)
                if neighbor_next in value:
                    count = value.count(neighbor_next)
                    pattern = f"([^{separator}]*{separator}){{{count}}}[^{separator}]*"
                else:
                    pattern = f"[^{separator}]+"
            elif value[-1] in CH_SYMBOL:
                # 自身最后一个字符是特殊字符
                separator = iterable2escape(value[-1])
                count = value.count(value[-1])
                if count > 1:
                    pattern = f"([^{separator}]*{separator}){{{count}}}"
                else:
                    pattern = f"[^{separator}]*{separator}"
            else:
                pattern = r"[\s\S]+"
                if (string_chinese(neighbor_next) and string_chinese(value)) or \
                        (neighbor_next.isdecimal() and value.isdecimal()) or \
                        (string_isalpha(neighbor_next) and string_isalpha(value)):
                    # value和邻接字符都是中文、都是英文或者都是数字，此时只能按照字符个数来匹配value
                    pattern = f"[\s\S]{{{len(value)}}}"
                elif string_chinese(neighbor_next):  # 邻接字符是中文，但value是非中文，特殊处理
                    if not string_chinese(value):
                        pattern = "[^一-龥]+"
                elif not string_chinese(neighbor_next):  # 邻接字符是非中文
                    if string_chinese(value):  # value是中文,特殊处理
                        pattern = "[一-龥]+"
                    elif not neighbor_next.isdecimal():  # 邻居字符是非数字，value全部是数字
                        if all(v.isdecimal() for v in value):
                            pattern = r"\d+"
                    elif neighbor_next.isdecimal():  # 邻居字符是数字，value是非数字
                        if all(not v.isdecimal() for v in value):
                            pattern = r"[^\d]+"
    else:
        pattern = create_string_pattern(value, neighber=suffix, create_prefix=False)
        if suffix[0] not in CH_SYMBOL:
            if (string_chinese(suffix[0]) and string_chinese(value)) or \
                    (suffix[0].isdecimal() and value.isdecimal()) or \
                    (string_isalpha(suffix[0]) and string_isalpha(value)):
                # value和邻接字符都是中文、都是英文或者都是数字，此时只能按照字符个数来匹配value
                pattern = f"[\s\S]{{{len(value)}}}"
            elif string_chinese(suffix[0]):  # 邻接字符是中文，但value是非中文，特殊处理
                if not string_chinese(value):
                    pattern = "[^一-龥]+"
            elif not string_chinese(suffix[0]):  # 邻接字符是英文,但value是非英文,特殊处理
                if string_chinese(value):
                    pattern = "[一-龥]+"
                elif not suffix[0].isdecimal():  # 邻居字符是非数字，value全部是数字
                    if all(v.isdecimal() for v in value):
                        pattern = r"\d+"
                elif suffix[0].isdecimal():  # 邻居字符是数字，value是非数字
                    if all(not v.isdecimal() for v in value):
                        pattern = r"[^\d]+"
    return add_name_group(pattern, name)


def create_field_pattern(name, prefix="", value="", suffix="", strict=0, neighbor_next=''):
    prefix_pattern = create_prefix_pattern(prefix, value)
    value_pattern = create_value_pattern(name, value, suffix, neighbor_next)
    return prefix_pattern + value_pattern


def create_enclose_pattern(enclose_l):
    enclose_patterns = []
    for el in enclose_l:
        er = iterable2escape(CH_ENCLOSE_MAP[el])
        el = iterable2escape(el)
        enclose_patterns.append(f"{el}.*?{er}")
    return "|".join(enclose_patterns)


def try_create_kv_pattern(s):
    """
    扫描原始日志，尽可能给出提取原始日志的KV正则表达式；如果无法给出正则表达式返回空字符串
    :param s: 原始日志
    :return: 返回KV正则表达式
    """
    # 将典型的分隔符作为field_separator和value_separator的初始值
    field_sp_init = '\t\n\r;, &|'

    # 为了支持中文，这些典型分隔符的全角字符也被考虑在内
    field_sp_init += SBC2DBC(field_sp_init)
    field_sp_init = "".join(set(field_sp_init))
    value_sp_init = ':= \t'
    value_sp_init += SBC2DBC(value_sp_init)
    value_sp_init = "".join(set(value_sp_init))

    # 删除原始日志s首尾的空白字符，这些首尾的空白字符不是KV的一部分，故删除后可以避免这些空白字符被当作分隔符
    for c in CH_WHITESPACE:
        s = s.strip(c)

    # KV的正则表达由4个部分组成：
    # 1）匹配KEY的正则表达式
    # 2）匹配KEY和VALUE之间的分隔符的正则表达式
    # 3）匹配VALUE的正则表达式
    # 4）匹配KV-KV间分隔符的正则表达式

    # 生成匹配KEY的正则表达式name_pattern；KEY中一般不会包含分隔符或者引号这类特殊字符，故匹配KEY的时候
    # 只需匹配这类字符的补集即可；
    all_sp_set = set(field_sp_init + value_sp_init + "'\"")
    all_sp_set.remove(" ")
    all_sp = ""
    for v in all_sp_set:
        all_sp += iterable2escape(v)

    # 匹配KEY的正则表达式
    name_pattern = f"[^{all_sp}]*"
    # 匹配KEY和VALUE之间的分隔符的正则表达式
    value_sp_pattern = f'[{value_sp_init}]+'
    # 匹配VALUE的正则表达式
    value_pattern = f'[^{field_sp_init}]*'
    # 匹配KV-KV间分隔符的正则表达式
    field_sp_pattern = f'[{field_sp_init}]+|$'
    # 将上述4个正则表达式连接起来得到KV的正则表达式
    kv_pattern = f"({name_pattern})({value_sp_pattern})({value_pattern})({field_sp_pattern})"

    # 使用上述生成的KV正则匹配原始日志，如果匹配不成功，说明原始日志的KV分隔符是非典型分隔符，无法通过自动KV正则的功能
    # 生成正则，此时需要用户自行输入分隔符或者正则表达式
    finds = re.findall(kv_pattern, s)
    if not finds:
        return ""

    # 检测value中是否包含了引号
    enclose_l = set()
    for f in finds:
        if f[2] and f[2][0] in CH_ENCLOSE_L:
            enclose_l.add(f[2][0])

    if enclose_l:
        # 如果有引号或者括号，需VALUE的正则表达式需要考虑匹配引号或者括号
        enclose_pattern = create_enclose_pattern(enclose_l)
        value_pattern = f"{enclose_pattern}|{value_pattern}"
        # 重新生成一次KV正则表达式，然后再次用新生成的KV正则表达式对原始日志进行提取得到结果
        kv_pattern = f"({name_pattern})({value_sp_pattern})({value_pattern})({field_sp_pattern})"
        finds = re.findall(kv_pattern, s)

    value_sp_set = set()  # 用于搜集finds结果中匹配到的KEY-VALUE分隔符
    field_sp_set = set()  # 用于搜集finds结果中匹配到的KV-KV分隔符
    not_word_set = set()  # 用于搜集KEY中可能存在的特殊字符
    # 使用上述KV正则并不一定能够得到最准确的提取结果，这里还需要进一步检查finds结果中的KV值，对分隔符进行一次优化
    # 优化思路：
    # 假如上述finds中有N个结果，接下来逐一检查这N个结果的KEY命名是否符合常规的命名方式；如果第i个KEY不符合常规命名，
    # 说明KV正则表达式分割错误，且该错误一定是由第i-1个结果中field_separator分割错误引起从而致
    # 第i个结果的KEY拿到了特殊字符，因此第i-1个结果中field_separator不适合作为KV-KV间的分隔符，从而将这些分隔符剔除
    for i, value in enumerate(finds):
        name, value_sp, value, field_sp = value
        # 如果KEY是以数字开头或者非字母中文下划线开头的命名，则认为该KEY不是一个常规命名
        if re.match(r'^\d.*', name) or not re.match(r'^[_a-zA-Z0-9一-龥].*', name):
            if i > 0:
                if finds[i - 1][3] in field_sp_set:
                    field_sp_set.remove(finds[i - 1][3])  # 删除不适合的KV-KV分隔符
                    continue
            else:
                continue
        if value_sp:
            value_sp_set.update(set(value_sp))  # 更新搜集到的KEY-VALUE分隔符
        if field_sp:
            field_sp_set.update(set(field_sp))  # 更新搜集到的KV-KV分隔符

        # 将KEY中包含的下划线、字母、数字和中文字符清除掉，剩余的就是KEY包含的特殊字符；这里先记录到not_word_set中
        not_word = re.sub("[_a-zA-Z0-9一-龥]+", "", name)
        not_word_set.update(set(not_word))

    # 再次更新KEY的正则表达式
    if not_word_set:
        not_words = iterable2escape(not_word_set)
        # TODO: 此处的KEY正则表达式会匹配到以特殊字符开始的字符，但通常来说一般KEY的命名不会以特殊字符开始
        # 此种写法会比较合适但需要验证：[_a-zA-Z0-9一-龥][_a-zA-Z0-9一-龥{not_words}]+
        name_pattern = f"[_a-zA-Z0-9一-龥{not_words}]+"
    else:
        name_pattern = "[_a-zA-Z0-9一-龥]+"

    # print(finds)
    # print(value_sp_set, field_sp_set, not_word_set)
    # KEY-VALUE分隔符和KV-KV分隔符包含了空格，则只要分隔符列表超过1，就从分隔符中剔除空格
    common_sp = value_sp_set & field_sp_set
    if common_sp and " " in common_sp:
        c = " "
        if len(field_sp_set) > 1:
            field_sp_set.remove(c)
        elif len(value_sp_set) > 1:
            value_sp_set.remove(c)

    # 再次更新KV各个部分的正则表达式
    value_sp = "".join(list(value_sp_set))
    field_sp = "".join(list(field_sp_set))
    value_sp_pattern = f'{value_sp}'
    if len(value_sp) > 1:
        value_sp_pattern = f'[{value_sp_pattern}]+'

    if len(field_sp) > 1:
        field_sp_pattern = f'[{field_sp}]+|$'
    else:
        field_sp_pattern = f'{field_sp}|$'

    value_pattern = f'[^{field_sp}]*'
    if enclose_l:
        # 如果有引号，考虑引号配对后，重新生成一次正则表达式
        enclose_pattern = create_enclose_pattern(enclose_l)
        value_pattern = f"{enclose_pattern}|{value_pattern}"

    return f"({name_pattern}){value_sp_pattern}({value_pattern})(?:{field_sp_pattern})"


def create_kv_pattern(s, field_separator, value_separator, enclose=True):
    """
    生成提取KV的正则表达式，有几点需要考虑，以"K1=V1 K2=V2 K3=V3" 为例，字段分隔符为空格，K和V的分隔符为=
    1） 所有的V中均不会出现空格或者则=，则生成正则表达式([^\s=]+)=([^\s]+)(?:\s+|$)
    2） V中出现空格，但是该空格不在引号中，语义有问题，无法处理
    3） V是一个引号引起来的字符串，此时该字符串也可能包含空格或者=，例如V2='Set value1=1000 and set value2=10'
        此种条件下，分两种情况考虑
        a 对于引号的匹配，使用正则表达式(\'.*\')
        b 非引号匹配，使用1)中的正则表达式
        最终结果为： ([^\s=]+)=(\'.*?\')(?:\s+|$)|([^\s=]+)=([^\s]+)(?:\s+|$)
    """
    field_sp = ""
    value_sp = ""

    if field_separator:
        # print(f"field_separator='{field_separator}'")
        field_separator = escape(field_separator)
        # print(f"field_separator='{field_separator}'")
        field_sp = iterable2escape(field_separator)
        # print(f"field_sp='{field_sp}'")

    if value_separator:
        # print(f"value_separator='{value_separator}'")
        value_separator = escape(value_separator)
        # print(f"value_separator='{value_separator}'")
        value_sp = iterable2escape(value_separator)
        # print(f"value_sp='{value_sp}'")

    # 如果给定了分隔符，使用给定的分割符分割K和V
    if field_separator and value_separator:
        if field_separator == value_separator:
            raise ValueError("字段分隔符和KEY-VALUE分隔符不能相同")

        pattern = f"(.+?)[{value_sp}]+([^{field_sp}]+)(?:[{field_sp}]+|$)"
        if not enclose:
            return pattern
        find = re.findall(pattern, s)
        enclose_l = set()
        for kv in find:
            value = kv[1]
            # 不考虑括号或者引号嵌套的问题，如果有请使用prefix_pattern或者field_pattern
            if value[0] in CH_ENCLOSE_L:
                enclose_l.add(value[0])

        if enclose_l:
            # p='([^\s=]+)=([^\'][^\s]+|.*?\')(?:\s+|$)'
            pattern = f"(.+?)[{value_sp}]+([^{iterable2escape(enclose_l)}][^{field_sp}]*"
            for el in enclose_l:
                er = CH_ENCLOSE_MAP[el]
                pattern += f"|{iterable2escape(el)}.*?{iterable2escape(er)}"
            pattern += f")(?:[{field_sp}]+|$)"

    elif value_separator:  # 没有KEY，只有value，比如空格分隔的VALUE
        pattern = f"([^{value_sp}]+)(?:[{value_sp}]+|$)"
        if not enclose:
            return pattern
        find = re.findall(pattern, s)
        enclose_l = set()
        for value in find:
            if value[0] in CH_ENCLOSE_L:
                enclose_l.add(value[0])
        if enclose_l:
            pattern = f"([^{enclose_l}][^{value_sp}]+"
            for el in enclose_l:
                er = CH_ENCLOSE_MAP[el]
                pattern += f"|{iterable2escape(el)}.*?{iterable2escape(er)}"
            pattern += f")(?:[{value_sp}]+|$)"
    else:
        raise ValueError("未指定分隔符")

    return pattern


def escape(value):
    def repl(matchobj):
        group = matchobj.group(0)
        if group == '\\n':
            return '\n'
        if group == '\\r':
            return '\r'
        elif group == '\\t':
            return '\t'
        elif group == '\\\\':
            return '\\'
        else:
            return group

    return re.sub(r'\\\\|\\n|\\r|\\t', repl, value)


def try_extract_key_value_separator(s):
    count = defaultdict(int)
    # 统计特殊字符的个数
    for c in s:
        if c in CH_SYMBOL:
            count[c] += 1

    separators = sorted(count, key=lambda key: count[key], reverse=True)
    field_separator = ""
    value_separator = ""
    for c in separators:  # 找到疑似分割field的特殊字符
        if c in " |,;\t + " + SBC2DBC(" |,;\t + "):
            field_separator = c
            break

    for c in separators:
        if c in ":=" + SBC2DBC(":="):  # 找到疑似分割key和value的特殊字符
            value_separator = c
            break

    return field_separator, value_separator


def try_extract_value_separator(s):
    count = defaultdict(int)
    # 统计特殊字符的个数
    for c in s:
        if c in CH_SYMBOL:
            count[c] += 1

    separators = sorted(count, key=lambda key: count[key], reverse=True)
    value_separator = " "
    for c in separators:  # 找到疑似分割field的特殊字符
        if c in " |,;\t" + SBC2DBC(" |,;\t"):
            value_separator = c
            break

    return value_separator


def create_patterns(s, fields):
    at = 0
    patterns = ''

    for i, (name, f) in enumerate(fields.items()):

        pos, size = f['pos'], f['size']
        prefix = s[at: pos]
        value = s[pos: pos + size]  # 该字段对应的字符串

        neighbor_next = ''
        if (i + 1) < len(fields):
            config_next = fields[i + 1]
            suffix = s[pos + size: config_next['pos']]  # 在field[i]和field[i+1]之间可能存在没有被选中的字符
            neighbor_next = s[config_next['pos']]  # 邻接字段的第一个字符
        else:
            suffix = s[pos + size:]
            if not suffix:
                suffix = None

        p = create_field_pattern(name, prefix=prefix, value=value, suffix=suffix, strict=0,
                                 neighbor_next=neighbor_next)
        f['pattern'] = p
        f['value'] = value
        patterns += p

        at = pos + size
        i += 1

    return patterns
