# -*- coding: utf-8 -*-
# created on 2016/8/26

import itertools
import re
from copy import deepcopy
from mathsolver.functions.base import getobj, BaseValue
from mathsolver.functions.base.objects import BaseRootFunc


def extract(token):
    objs = re.findall("[0-9A-Za-zS_]{2,}", token)
    token = re.sub("[0-9A-Za-zS_]{2,}", "obj", token)

    objs = [re.sub("[0-9]+", "", obj) for obj in objs]
    objs = ["value" if obj == "obj" else obj for obj in objs]
    objs = [getobj(obj) for obj in objs]
    return token, objs


def exact_match(item1, item2):
    cand, cand_objs = item1
    token, token_objs = item2
    return (token == cand and len(token_objs) == len(cand_objs) and
            all([obj1 == obj2 for obj1, obj2 in zip(token_objs, cand_objs)]))


def _blur_match(obj1, obj2):
    if obj2 == BaseValue or obj1 == obj2:
        return True

    if not (isinstance(obj1, str) or isinstance(obj2, str)):
        try:
            return issubclass(obj2, obj1) or (issubclass(obj1, BaseRootFunc) and obj2.__name__ in ["BaseEq", "BaseCurve"]) or (obj1.__name__ == "BaseInter" and obj2.__name__ == "BasePoint")
        except Exception:
            pass
    else:
        return obj1 == obj2


def blur_match(item1, item2):
    cand, cand_objs = item1
    token, token_objs = item2
    return token == cand and len(token_objs) == len(cand_objs) and all(
        [_blur_match(obj1, obj2) for obj1, obj2 in zip(token_objs, cand_objs)])


def check(cand, items):
    try:
        clean_cand, cand_objs = extract(cand)
    except Exception:
        return False

    for token, token_objs in items:
        if token == clean_cand and len(token_objs) == len(cand_objs):
            item1 = [clean_cand, cand_objs]
            item2 = [token, token_objs]
            if exact_match(item1, item2) or blur_match(item1, item2):
                return True
    return False


def get_sub_token(parse, name):
    if name in parse:
        token = parse[name]
        parse[name] = ["a", "b"]
        return [token]

    elif isinstance(parse, dict):
        cands = []
        for key, value in parse.items():
            cands += get_sub_token(value, name)
        return cands
    elif isinstance(parse, list):
        cands = []
        for value in parse:
            cands += get_sub_token(value, name)
        return cands
    else:
        return []


class Classifier(object):
    """
    判断字段属于条件还是问题，并且匹配上形式语言。
    """

    def __init__(self, all_conditions, all_questions):
        self.all_conditions = [extract(token) for token in all_conditions]
        self.all_questions = [extract(token) for token in all_questions]
        self.temp_tokens = []

    def deep_search(self, token):
        token = deepcopy(token)
        tokens = []
        if "AND" in str(token):
            ands = get_sub_token(token["main"], "AND")
            if len(ands) != 1 or len(ands[0]) != 2:
                return

            first, second = ands[0]
            if "OF" in first and "noun" in second:
                second = {"OF": [first["OF"][0], second]}

            if "noun" in first and "OF" in second:
                first = {"OF": [first, second["OF"][1]]}

            ands = [first, second]
            is_pass = [False, False]

            cands = flatten.flatten(token["main"])
            cands = sorted(set(cands), key=lambda x: cands.index(x))
            for cand in cands:
                for i, value in enumerate(is_pass):
                    arg = ands[i]
                    if not value:
                        for item in flatten.flatten(arg):
                            temp = cand.replace("AND(a,b)", item)
                            cleaned_temp = clean_query(temp)
                            if check(cleaned_temp, self.all_conditions):
                                tokens.append({"text": token["text"], "condition": temp, "main": {}})
                                is_pass[i] = True
                                break

                            if check(cleaned_temp, self.all_questions):
                                tokens.append({"text": token["text"], "question": temp, "main": {}})
                                is_pass[i] = True
                                break
            return tokens
        # return tokens

    def recursive_search(self, token, prefix=""):
        deepflatten = DeepFlatten(self.all_conditions, self.all_questions)
        cands = deepflatten.flatten(token["main"])
        if len(cands) == 1 and re.match("obj[0-9]+$", cands[0]) and not prefix:
            cands = [deepflatten.temp_tokens[-1]["condition"].split("=")[0]]
            deepflatten.temp_tokens = deepflatten.temp_tokens[:-1]

        if not deepflatten.temp_tokens:
            return

        for i, cand in enumerate(cands):
            cand = prefix + cand
            cleaned_cand = clean_query(cand)
            num = [int(n) for n in re.findall("obj([0-9]+)", cand)]

            if num:
                num = max(num)
            else:
                continue

            if "condition" not in token and check(cleaned_cand, self.all_conditions):
                token["condition"] = ";".join([t["condition"] for t in deepflatten.temp_tokens if int(re.findall("obj([0-9]+)$", t["condition"])[0]) <= num] + [cand])

            if "question" not in token and check(cleaned_cand, self.all_questions):
                if cleaned_cand != "IS(obj,)":
                    token["question"] = ";".join([t["condition"] for t in deepflatten.temp_tokens if int(re.findall("obj([0-9]+)$", t["condition"])[0]) <= num] + [cand])
                else:
                    token["question"] = ";".join([t["condition"] for t in deepflatten.temp_tokens if int(re.findall("obj([0-9]+)$", t["condition"])[0]) <= num])
                break

        if "condition" not in token and "question" not in token and prefix:
            condition, question = "", ""
            for i, cand in enumerate(cands):
                cleaned_cand = clean_query(cand)
                if "condition" not in token and check(cleaned_cand, self.all_conditions):
                    condition = ";".join([t["condition"] for t in deepflatten.temp_tokens] + [cand])

                if "question" not in token and check(cleaned_cand, self.all_questions):
                    if cleaned_cand != "IS(obj,)":
                        question = ";".join([t["condition"] for t in deepflatten.temp_tokens] + [cand])
                    else:
                        question = ";".join([t["condition"] for t in deepflatten.temp_tokens])
                    break

            deepflatten.obj_index += 1
            obj = "obj%d" % deepflatten.obj_index
            if "MT" in prefix:
                if condition:
                    query = "%s=%s" % (condition, obj)
                    token["condition"] = "%s;%s%s" % (query, prefix, obj)
                elif question:
                    query = "%s=%s" % (question, obj)
                    token["condition"] = "%s;%s%s" % (query, prefix, obj)
            else:
                if condition:
                    query = "%s=%s" % (condition, obj)
                    token["question"] = "%s;%s%s" % (query, prefix, obj)
                elif question:
                    query = "%s=%s" % (question, obj)
                    token["question"] = "%s;%s%s" % (query, prefix, obj)

        if "condition" in token or "question" in token:
            return [token]

    def search(self, token, prefix=""):
        """
        在规则库中搜索匹配规则
        :param token: 解析结果 {"main"：..., "text":...}
        :param prefix:
        :return: token: 更新后的解析结果 {"main"：..., "text":...,"condition":...,"question":...,"optionQ":...}
        """
        cands = flatten.flatten(token["text"]) + flatten.flatten(token["main"])
        actions = token["actions"] if "actions" in token else ""
        prefix = actions + prefix
        for cand in cands:
            cleaned_cand = prefix + clean_query(cand)
            if "condition" not in token and check(cleaned_cand, self.all_conditions):
                token["condition"] = prefix + cand

            if "question" not in token and check(cleaned_cand, self.all_questions):
                token["question"] = prefix + cand

        if not ("condition" in token or "question" in token):
            if prefix:
                for cand in cands:
                    cleaned_cand = clean_query(cand)
                    if ("condition" not in token and check(cleaned_cand, self.all_conditions)) or (
                            "question" not in token and check(cleaned_cand, self.all_questions)):
                        if "MT" in prefix:
                            token["condition"] = "%s=obj42;%sobj42" % (cand, prefix)
                        else:
                            token["question"] = "%s=obj42;%sobj42" % (cand, prefix)
                        return [token]

            tokens = self.deep_search(token)
            if tokens:
                return tokens

            tokens = self.recursive_search(token, prefix)
            if tokens:
                return tokens
            else:
                if len(token["main"]) == 1 and ("PP" in token["main"] or "TP" in token["main"]):
                    if not re.search("[a-z]", cands[1]):
                        return []

        return [token]

    def classify(self, tokens):
        flatten.S = None
        new_tokens = []
        for token in tokens:
            new_tokens += self.search(token)

        return new_tokens

    def replace(self, token, subject):
        if isinstance(token, dict):
            for key in token.keys():
                if isinstance(token[key], list) and "pron" in token[key]:
                    if isinstance(subject, dict):
                        token.pop(key)
                        token.update(subject)
                    else:
                        token[key] = subject
                else:
                    self.replace(token[key], subject)
        elif isinstance(token, list):
            for i in range(len(token)):
                if isinstance(token[i], list) and "pron" in token[i]:
                    token[i] = subject
                else:
                    self.replace(token[i], subject)

    @staticmethod
    def preprocess(tokens):
        merge_flag1 = False
        merge_flag2 = False
        pp = None
        merged_tokens = []

        for i, cond in enumerate(tokens):
            if merge_flag1:
                merged_tokens.append(dict(cond))
                merged_tokens[-1]["main"].update(pp["main"])
                merged_tokens[-1]["text"] += pp["text"]
            elif merge_flag2:
                merged_tokens[-1]["main"].update(cond["main"])
                merged_tokens[-1]["text"] += cond["text"]
                merge_flag2 = False
            else:
                merged_tokens.append(dict(cond))

            if i == 0 and ("Triangle" in cond["text"] or ("三角形" in cond["text"] and "Ineqs" not in cond["text"])):
                # 在直角△ABC中,A是△ABC的直角顶点,G是△ABC的重心,|\\overrightarrow{AB}|=2,|\\overrightarrow{AC}|=1,则\\overrightarrow{AG}•\\overrightarrow{BC}=
                # 对于三角形类，会把三角形这个条件加到每个条件中去
                merge_flag1 = True
                pp = {"main": {"PP": {"noun": ["Triangle1"]}}, "text": cond["text"]}
            elif i == 0 and ("Quadra" in cond["text"]):
                merge_flag1 = True
                pp = {"main": {"PP": {"noun": ["Quadra"]}}, "text": cond["text"]}
            elif len(cond["main"]) == 1 and ("TP" in cond["main"] or "PP" in cond["main"]) and i < len(tokens) - 2:
                # merge_flag2 = True
                merge_flag2 = False  # close merge

        tokens = merged_tokens
        subject = ""
        for token in tokens:
            if isinstance(token["main"], dict):
                # poly的绝对值是，相反数是
                if subject:
                    if token["main"].get("V") == "IS" and (
                            not token["main"]["O"] or token["main"]["O"] == {'noun': ['what']}):
                        if "AND" in token["main"]["S"] and len(token["main"]["S"]["AND"]) == 1:
                            token["main"]["S"] = {"AND": [subject, token["main"]["S"]["AND"][0]]}
                        else:
                            token["main"]["S"] = {"OF": [subject, token["main"]["S"]]}

                elif token["main"].get("V") == "IS" and (
                        not token["main"]["O"] or token["main"]["O"] == {'noun': ['what']}):
                    if "OF" in token["main"]["S"]:
                        subject = token["main"]["S"]["OF"][0]
                    elif "AND" in token["main"]["S"]:
                        subject = token["main"]["S"]["AND"][0]
            else:
                subject = ""

        return tokens

    @staticmethod
    def error_report(token):
        mains = flatten.flatten(token["main"])
        text = token["text"]  # re.findall("\((.*?)-[a-z0-9]*\)", token["text"])
        text = "Rule of %s not found. " % "".join(text)
        cands = "Try to write a rule to match following token: " + "; ".join([clean_query(main) for main in mains])
        raise ValueError(text + cands)

    @staticmethod
    def rule_report(token):
        mains = flatten.flatten(token["main"])
        mains = list(set(mains))
        text = token["text"]  # re.findall("\((.*?)-[a-z0-9]*\)", token["text"])
        text = "Token:  %s. " % "".join(text)
        cands = "Rule: " + "; ".join([clean_query(main) for main in mains])
        print(text + cands)

    def simple_label(self, tokens):
        conditions = []
        questions = []
        for token in tokens:
            try:
                self.rule_report(token)
            except Exception as e:
                print(e)
            if "condition" in token:
                token["query"] = token["condition"]
                conditions.append(token)
            elif "question" in token:
                token["query"] = token["question"]
                questions.append(token)
            else:
                self.error_report(token)

        return conditions, questions

    def run_option(self, token, options):
        text = token["text"]
        parse = token["main"]
        option_qs = []
        for option in options:
            option_q = {}
            if option:
                sub_conditions, sub_questions = self.simple_label(self.classify(option[:-1]))
                assert len(sub_questions) == 0

                if "V" in parse and "IS" == parse["V"] and "VP" in parse["S"]:
                    new_parse = {"V": parse["S"]["VP"]["verb"], "S": option[-1]["main"]["S"],
                                 "O": {"noun": parse["S"]["VP"]["noun"]}}
                elif "正确" in text or "错误" in text:
                    new_parse = option[-1]["main"]
                elif parse == {"S": "", "V": "", "O": ""} or parse == {'S': {'none': ['what']}, 'O': '',
                                                                       'V': ''} or \
                        parse == {"S": {"OF": [{"noun": ["variables2"]}, {"noun": ["大小", "关系"]}]},
                                  "O": {"noun": ["what"]}, "V": "IS"}:
                    new_parse = option[-1]["main"]
                elif "question" not in token:
                    if "V" in parse and parse["V"]:
                        new_parse = {"V": parse["V"], "S": parse["S"], "O": option[-1]["main"]["S"]}
                    else:
                        new_parse = option[-1]["main"]
                else:
                    option_qs.append({})
                    continue

                token["main"] = new_parse
                cands = flatten.flatten(new_parse)

                option_token = {"main": new_parse, "text": text + option[-1]["text"], }
                for cand in cands:
                    cand = "TOF:" + cand
                    cleaned_cand = clean_query(cand)
                    if check(cleaned_cand, self.all_questions):
                        option_token["query"] = cand
                        break

                if not ("condition" in option_token or "question" in option_token):
                    tokens = self.recursive_search(token, "TOF:")
                    if tokens:
                        for token in tokens[:-1]:
                            token["query"] = token["condition"]
                            sub_conditions.append(token)

                        option_token["query"] = tokens[-1]["question"]

                if "query" in option_token or "question" in option_token:
                    option_q = {"conditions": sub_conditions, "questions": [option_token]}
            option_qs.append(option_q)
        return option_qs

    def process(self, tokens):
        tokens = self.preprocess(tokens)
        tokens = self.classify(tokens)

        conditions, questions = self.simple_label(tokens[:-1])
        token = tokens[-1]
        # self.rule_report(token)
        if "question" in token:
            token["query"] = token["question"]
            questions.append(token)
        elif "condition" in token:
            token["query"] = token["condition"]
            conditions.append(token)
        else:
            self.error_report(token)

        return conditions, questions

    def process_options(self, tokens):
        tokens = self.preprocess(tokens)
        conditions, questions = self.simple_label(self.classify(tokens[:-1]))
        assert len(questions) == 0
        self.search(tokens[-1], prefix="TOF:")
        if "question" in tokens[-1]:
            tokens[-1]["query"] = tokens[-1]["question"]
        else:
            tokens[-1]["query"] = "TOF:模板"
        return conditions, [tokens[-1]]


def clean_query(text):
    query = re.sub("[0-9]+", "", text.strip()).replace(" ", "")
    if "andequal" in query:
        query = query.replace("andequal", "")
        if query.isalpha():
            query = "OF(%s,值)" % query

    return query.replace(" ", "")


class Flatten(object):
    PP = None
    S = None

    def flat_s(self, tree):
        if "PP" in tree:
            self.PP = self.flatten(tree["PP"])
        else:
            self.PP = None
        ss = self.flatten(tree["S"])
        if re.search("(func[0-9]+)", str(tree["S"])):
            self.S = re.search("(func[0-9]+)", str(tree["S"])).group(0)
        elif re.search("(zhixian.txt[0-9]+)", str(tree["S"])):
            self.S = re.search("(zhixian.txt[0-9]+)", str(tree["S"])).group(0)
        elif re.search("(eq[0-9]+)", str(tree["S"])):
            self.S = re.search("(eq[0-9]+)", str(tree["S"])).group(0)
        elif re.search("(Sequence[0-9]+)", str(tree["S"])):
            # 指代消解, 已知等差数列{a_{n}},a_{1}+a_{2}+a_{3}=-24,a_{18}+a_{19}+a_{20}=78,则此数列前20项和等于().
            self.S = re.search("(Sequence[0-9]+)", str(tree["S"])).group(0)

        main = []
        if tree["V"]:
            vv = self.flatten(tree["V"])
            oo = self.flatten(tree["O"])
            for s, v, o in itertools.product(ss, vv, oo):
                main.append("%s(%s, %s)" % (v, s, o))
                if v == "IS" or v == "等于":
                    main.append("IS(%s, %s)" % (o, s))
                    if ("(" in s or v == "等于") and o == "":
                        main.append(s)
                elif not re.search("[a-z]", ",".join(ss)):
                    main.append("%s(%s, %s)" % (v, "", o))

                if not s:
                    main.append("%s(%s, %s)" % (v, self.S, o))
        else:
            main += ss

        if "TP" in tree:
            tp = self.flatten(tree["TP"])
            cands = ["TP(%s),%s" % (tp, m) for tp, m in itertools.product(tp, main)]
            cands += [m for tp, m in itertools.product(tp, main) if re.search("[a-z]", tp) is None]
            return cands
        elif self.PP:
            pp = self.PP
            cands = ["PP(%s),%s" % (pp, m) for pp, m in itertools.product(pp, main)]
            cands += [m for pp, m in itertools.product(pp, main) if re.search("[a-z]", pp) is None]
            return cands
        else:
            return main

    def flat_tp(self, tree):
        if "PP" in tree:
            pp = self.flatten(tree["PP"])
        else:
            pp = None

        cand = self.flatten(tree["TP"])  # ["TP(%s)" % token for token in flatten(tree["TP"])]
        if pp:
            return ["PP(%s),%s" % (pp, m) for pp, m in itertools.product(pp, cand)] + cand
        else:
            return cand

    def flat_number(self, tree):
        number = self.flatten(tree["number"])
        none = self.flatten(tree["noun"])
        return ["[%s,%s]" % item for item in itertools.product(number, none)] + none

    def flat_noun(self, tree):
        candidates1 = []
        candidates2 = []
        if isinstance(tree["noun"], str):
            tree["noun"] = [tree["noun"]]
        nones = [noun for noun in list(tree["noun"]) if noun != "what"]
        if not nones:
            nones = [""]

        if len(nones) > 1:
            candidates1.append("[%s]" % ",".join(nones))
            for i in range(len(nones), 0, -1):
                for cand in itertools.combinations(nones, i):
                    if len(cand) > 1:
                        none = "[%s]" % ",".join(cand)
                    else:
                        none = cand[0]

                    if re.search("[a-z]", none):
                        candidates1.append(none)
                    else:
                        candidates2.append(none)

        else:
            return nones

        if 'pron' in list(tree["noun"]):
            ss = self.S
            nones = [noun for noun in list(tree["noun"]) if noun != "pron"]
            if len(nones) > 1:
                candidates2.append("OF(%s,[%s])" % (ss, ",".join(nones)))
            else:
                candidates2.append("OF(%s,%s)" % (ss, ",".join(nones)))

        return candidates1 + candidates2

    def flat_of(self, tree):
        np1 = self.flatten(tree["OF"][0])
        np2 = self.flatten(tree["OF"][1])
        if np2 == ["结果"] or np2 == ["图象"] or np2 == ["图像"]:
            return ["OF(%s,%s)" % item for item in itertools.product(np1, np2)] + np1
        else:
            return ["OF(%s,%s)" % item for item in itertools.product(np1, np2)] + np2

    def simple_flat(self, tree):
        tokens = []
        if isinstance(tree, dict):
            for value in tree.values():
                tokens.extend(self.simple_flat(value))
            return tokens

        elif isinstance(tree, list):
            for item in tree:
                tokens += self.simple_flat(item)
            return tokens
        else:
            return [tree]

    def flat_and(self, tree):
        np1 = self.flatten(tree["AND"][0])
        np2 = self.flatten(tree["AND"][1]) if len(tree["AND"]) > 1 else [""]

        cands = ["AND(%s,%s)" % i for item in itertools.product(np1, np2) for i in itertools.permutations(item)]
        cands += ["%s%s" % item for item in itertools.product(np1, np2) if re.search("[a-z]", "%s%s" % item) is None]
        return cands

    def flatten(self, tree):
        if isinstance(tree, str):
            return [tree.replace("what", "")]

        elif isinstance(tree, list):
            return self.flat_noun({"noun": tree})
        elif "S" in tree:
            return self.flat_s(tree)
        elif "number" in tree:
            return self.flat_number(tree)
        elif "noun" in tree:
            return self.flat_noun(tree)
        elif "TP" in tree:
            return self.flat_tp(tree)
        elif "PP" in tree:
            if re.search("(func[0-9]+)", str(tree["PP"])):
                self.S = re.search("(func[0-9]+)", str(tree["PP"])).group(0)
            elif re.search("(zhixian.txt[0-9]+)", str(tree["PP"])):
                self.S = re.search("(zhixian.txt[0-9]+)", str(tree["PP"])).group(0)
            elif re.search("(eq[0-9]+)", str(tree["PP"])):
                self.S = re.search("(eq[0-9]+)", str(tree["PP"])).group(0)
            elif re.search("(Sequence[0-9]+)", str(tree["PP"])):
                # 等差数列{a_{n}}中,a_{1}+a_{2}+a_{3}=-24,a_{18}+a_{19}+a_{20}=78,则此数列前20项和等于()
                self.S = re.search("(Sequence[0-9]+)", str(tree["PP"])).group(0)
            return self.flatten(tree["PP"])  # ["PP(%s)" % token for token in flatten(tree["PP"])]
        elif "OF" in tree:
            return self.flat_of(tree)
        elif "ADJ" in tree:
            tokens = self.simple_flat(tree["ADJ"])
            candidates = []
            if len(tokens) > 1:
                candidates.append("[%s]" % ",".join(tokens))
                for i in range(len(tokens), 0, -1):
                    for cand in itertools.combinations(tokens, i):
                        if len(cand) > 1:
                            candidates.append("[%s]" % ",".join(cand))
                        else:
                            candidates.append(cand[0])
            else:
                candidates.append(tokens[0])
            return candidates

        elif "NOT" in tree:
            return ["NOT(%s)" % text for text in self.flatten(tree["NOT"])]
        elif "AND" in tree:
            return self.flat_and(tree)

        elif "OR" in tree:
            np1 = self.flatten(tree["OR"][0])
            np2 = self.flatten(tree["OR"][1])
            return ["OR(%s,%s)" % item for item in itertools.product(np1, np2)] + np1 + np2

        elif "AP" in tree:
            adj = self.flatten(tree["AP"][0])
            np = self.flatten(tree["AP"][1])
            return ["ADJ(%s,%s)" % item for item in itertools.product(adj, np)] + np

        elif "VP" in tree:
            return self.flatten(tree["VP"])


class DeepFlatten(Flatten):
    def __init__(self, all_conditions, all_questions):
        self.all_conditions = all_conditions
        self.all_questions = all_questions
        self.obj_index = 42
        self.temp_tokens = []

    def flat_and(self, tree):
        np1 = self.flatten(tree["AND"][0])
        np2 = self.flatten(tree["AND"][1]) if len(tree["AND"]) > 1 else [""]

        cands = ["AND(%s,%s)" % i for item in itertools.product(np1, np2) for i in itertools.permutations(item)]
        for cand in cands:
            cleaned_cand = clean_query(cand)
            if check(cleaned_cand, self.all_conditions) or check(cleaned_cand, self.all_questions):
                self.obj_index += 1
                obj = "obj%d" % self.obj_index
                query = "%s=%s" % (cand, obj)
                self.temp_tokens.append({"condition": query, "main": tree, "text": ""})
                return [cand, obj]

        cands += ["%s%s" % item for item in itertools.product(np1, np2) if re.search("[a-z]", "%s%s" % item) is None]
        return cands

    def flat_noun(self, tree):
        nones = [noun for noun in list(tree["noun"]) if noun != "what"]
        if not nones:
            nones = [""]

        if len(nones) > 1:
            for item in itertools.combinations(nones, 2):
                cand = "[%s,%s]" % item
                cleaned_cand = clean_query(cand)
                if check(cleaned_cand, self.all_conditions) or check(cleaned_cand, self.all_questions):
                    self.obj_index += 1
                    obj = "obj%d" % self.obj_index
                    query = "%s=%s" % (cand, obj)
                    self.temp_tokens.append({"condition": query, "main": tree, "text": ""})
                    return [cand, obj]

                special = [c for c in item if clean_query(c) in ["Expression", "Expressions", "Line", "FuncEq"]]
                if special and len(special) == 1:
                    self.obj_index += 1
                    obj = "obj%d" % self.obj_index
                    query = "%s=%s" % (special[0], obj)
                    self.temp_tokens.append({"condition": query, "main": tree, "text": ""})
                    return [cand, obj]
        else:
            cand = clean_query(nones[0])
            if cand == "expression" or cand == "expressions" or "line" == cand or cand == "funceq":
                self.obj_index += 1
                obj = "obj%d" % self.obj_index
                query = "%s=%s" % (nones[0], obj)
                self.temp_tokens.append({"condition": query, "main": tree, "text": ""})
                return [obj]

        candidates1 = []
        candidates2 = []
        nones = [noun for noun in list(tree["noun"]) if noun != "what"]
        if not nones:
            nones = [""]

        if len(nones) > 1:
            candidates1.append("[%s]" % ",".join(nones))
            for i in range(len(nones), 0, -1):
                for cand in itertools.combinations(nones, i):
                    if len(cand) > 1:
                        none = "[%s]" % ",".join(cand)
                    else:
                        none = cand[0]

                    if re.search("[a-z]", none):
                        candidates1.append(none)
                    else:
                        candidates2.append(none)

        else:
            return nones

        if 'pron' in list(tree["noun"]):
            ss = self.S
            nones = [noun for noun in list(tree["noun"]) if noun != "pron"]
            if len(nones) > 1:
                candidates2.append("OF(%s,[%s])" % (ss, ",".join(nones)))
            else:
                candidates2.append("OF(%s,%s)" % (ss, ",".join(nones)))
        return candidates1 + candidates2

    def flat_of(self, tree):
        if "OF" in tree:
            cands1 = self.flatten(tree["OF"][0])
            cands2 = self.flatten(tree["OF"][1])
            outputs = []
            for item in itertools.product(cands1, cands2):
                cand = "OF(%s,%s)" % item
                if "图象" in cand or "图像" in cand:
                    continue

                cleaned_cand = clean_query(cand)
                if check(cleaned_cand, self.all_conditions) or check(cleaned_cand, self.all_questions):
                    self.obj_index += 1
                    obj = "obj%d" % self.obj_index
                    query = "%s=%s" % (cand, obj)
                    self.temp_tokens.append({"condition": query, "main": tree, "text": ""})
                    return [cand, obj]
                elif self.PP:
                    pp_cands = ["PP(%s),%s" % (pp, m) for pp, m in itertools.product(self.PP, [cand])]
                    for pp_cand in pp_cands:
                        cleaned_cand = clean_query(pp_cand)
                        if check(cleaned_cand, self.all_conditions) or check(cleaned_cand, self.all_questions):
                            self.PP = None
                            obj = "obj%d" % self.obj_index
                            query = "%s=%s" % (pp_cand, obj)
                            self.temp_tokens.append({"condition": query, "main": tree, "text": ""})
                            return [cand, obj]

                outputs.append(cand)

            if cands2 == ["结果"] or cands2 == ["图象"] or cands2 == ["图像"]:
                return outputs + cands1
            else:
                return outputs + cands2
        else:
            return self.flatten(tree)


flatten = Flatten()

if __name__ == '__main__':
    print(get_sub_token({'S': {
        'AND': [{'OF': [{'noun': ['\u5355\u9879\u5f0f', 'poly1']}, {'noun': ['\u7cfb\u6570']}]},
                 {'noun': ['\u6b21\u6570']}]}, 'O': {'noun': ['what']}, 'V': 'IS'}, "AND"))
    pass
