import re
import torch
from model.encoders.StanfordCoreNLP_x import *
from transformers import BertTokenizer

nlp = StanfordCoreNLP('utils/stanford-corenlp-4.4.0/stanford-corenlp-4.4.0')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')

def extract_phrase(data):
    sentence = ""
    if isinstance(data, list):
        for word in data:
            if "http" not in word:
                sentence = sentence + " " + word
    elif isinstance(data, str):
        sentence = data
    else:
        return False

    # 抽取名词
    phrase_list = []
    character1 = nlp.pos_tag(sentence)
    # ‘’ 和 “”
    # phrase_list = re.findall(r"['\"](.*?)['\"]", sentence)

    character = []
    if isinstance(data, list):
        for data_s in data:
            character_s = nlp.pos_tag(data_s)
            if len(character_s) == 0:
                tuple_nothing = ('?','?')
                character.append(tuple_nothing)
            else:
                character.append(character_s[0])

    index = 0
    i = 0
    start_list = []
    len_list = []

    while i < len(character):
        if "NN" in character[i][1] and character[i][0] != '#':
            str_temp = character[i][0]

            index = i + 1
            last = ""
            last_noun = ""
            while index < len(character) and (
                    ("NN" in character[index][1] and character[index][0] != '#') or character[index][0] == 'of' or character[index][0] == '.' or
                    character[index][0] == '-' or character[index][0] == '\''):
                str_temp = str_temp + last
                last = ' ' + character[index][0]
                last_token = character[index][0]
                last_noun = character[index][1]
                index = index + 1
            if "NN" in last_noun and last_token != '#' and last_token != 'of' and last_token != '.' and last_token != '-' and last_token != '\'':
                str_temp = str_temp + last
            phrase_list.append(str_temp.strip(" "))
            len_list.append(len(tokenizer._tokenize(str_temp)))
            start_list.append(i)
            i = index
        i = i+1

    return [phrase_list, start_list, len_list]
