import re

from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer

from database import DB


mongo = DB(user='admin', password='adminNLP')
lemmatizer = WordNetLemmatizer()
stemmer = SnowballStemmer('english')
reccommend = list(mongo.recommendation.find({}))


i = {'title': 'TURNER: The Uncertainty-based NER-test retrievtest akaretrievtest aaa testretriev Retrieval Framework for Chinese NER', 'thighlight': {}}
# i = {'title': '$\mathcal{Y}$-Tuning: An Efficient Tuning Paradigm for Large-Scale Pre-Trained Models via Label Representation Learning', 'thighlight': {}}


def remove_digit(sentence):
    sentence = re.sub(r'\d+', '', sentence)
    return sentence

def remove_punctuation(sentence):
    punctuation = r"""!"#$%&'()*+,./:;<=>?@[\]^_`{|}~"""
    # punctuation = r"""[·’!"\#$%&\'()＃！（）*+,-./:;<=>?\@，：?￥★、…．＞【】［］《》？“”‘’\[\\]^_`{|}~]+"""
    sentence = sentence.translate(str.maketrans({punct: "" for punct in punctuation}))
    return sentence

stop_words = [remove_punctuation(s) for s in stopwords.words('english')+['']]

def wordlevel_lemmatize(sentence):
    sentence = remove_punctuation(sentence).replace('-', '')
    return ' '.join([lemmatizer.lemmatize(word) for word in sentence.lower().split(' ') if not word in stop_words])

def new_lemmatize_dict(sentence):
    sentence = sentence.lower()
    lemma_sentence = remove_punctuation(sentence)
    pointer = 0
    l_dict = {}
    res = ''
    for word in sentence.split(' '):
        if word not in stop_words:
            lemma_word = remove_punctuation(word)
            pos = lemma_sentence.find(word, pointer)
            l_dict[(len(res), )] = pos
            pointer = pos+len(word)
            if pointer<len(sentence) and sentence[pointer] == ' ':
                pointer += 1
            res = f'{res}{lemmatizer.lemmatize(lemma_word.replace("-", ""))} '
    return res.strip(), l_dict


def lemmatize_dict(sentence):
    sentence = sentence.lower()
    pointer = 0
    l_dict = {}
    res = ''
    for word in sentence.split(' '):
        if word not in stop_words:
            compound_word = remove_punctuation(word).replace("-", " ").split(' ')
            pos = pointer = sentence.find(word, pointer)
            for every_word in compound_word:
                lemma_word = lemmatizer.lemmatize(every_word)
                hyphen_pos = sentence.find('-', pointer, pos+len(word))
                if hyphen_pos != -1:
                    l_dict[(len(res), len(res) + len(lemma_word))] = (pointer, hyphen_pos)
                    pointer = hyphen_pos + 1
                else:
                    l_dict[(len(res), len(res) + len(lemma_word))] = (pointer, pos+len(word))
                    pointer = pos + len(word)
                res = f'{res}{lemma_word} '
                while pointer < len(sentence) and sentence[pointer] in [' ', '-']:
                    pointer += 1
    return res.strip(), l_dict


def wordlevel_stem(sentence):
    sentence = remove_punctuation(sentence.lower()).replace('-', ' ')
    return ' '.join([stemmer.stem(w) for w in sentence.split(' ')])


def smin(l: int, r: int, q: int, arr: list):
    while l < r:
        mid = (l + r + 1) >> 1
        if arr[mid][0] < q:
            r = mid - 1
        else:
            l = mid
    return arr[l][0]


def smax(l: int, r: int, q: int, arr: list):
    while l < r:
        mid = (l + r + 1) >> 1
        if arr[mid][0] > q:
            r = mid - 1
        else:
            l = mid
    return arr[l][0]

appearIncrease = [0]*len(reccommend)
title, t_dict = lemmatize_dict(i["title"].lower())
for k, v in t_dict.items():
    print(f'{title[k[0]:k[1]]}{" "*10}-*-*-*-{" "*10}{i["title"][v[0]:v[1]]}')
for j, rec in enumerate(reccommend):
    if rec['lemmatized'] in title:
        flag = True
        pointer = 0
        t_start_map = {dict_key[0]: dict_key for dict_key in t_dict.keys()}
        t_end_map = {dict_key[1]: dict_key for dict_key in t_dict.keys()}
        while title.find(rec['lemmatized'], pointer) != -1:
            pos_x = title.find(rec['lemmatized'], pointer)
            pos_y = pos_x + len(rec['lemmatized'])
            if pos_x in t_start_map and pos_y in t_end_map:
                if flag:
                    appearIncrease[j] += 1
                    flag = False
                start, end = t_dict[t_start_map[pos_x]][0], t_dict[t_end_map[pos_y]][1]
                i['thighlight'][start] = end
                pointer = pos_y
            elif 'not_extend' not in rec or rec['not_extend'] == False:
                pos_x_extend = smax(0, len(t_start_map), pos_x, sorted(t_start_map.items()))
                pos_y_extend = smin(0, len(t_end_map), pos_y, sorted(t_end_map.items(), reverse=True))
                start, end = t_dict[t_start_map[pos_x_extend]][0], t_dict[t_end_map[pos_y_extend]][1]
                i['thighlight'][start] = end
                pointer = pos_y_extend
            else:
                pointer = pos_x + 1


atitle = []
if 'thighlight' in i and len(i['thighlight']):
    start = 0
    for key, v in sorted(i['thighlight'].items()):
        if start < key:
            atitle.append({
                "type": "textRun",
                "textRun": {
                    "text": i['title'][start:key],
                    "style": {
                        "bold": True,
                        "location": {
                            "zoneId": "0",
                            "startIndex": 74,
                            "endIndex": 75
                        }
                    }
                }
            })
        if start < v and key < v:
            atitle.append({
                "type": "textRun",
                "textRun": {
                    "text": i['title'][max(start, key):v],
                    "style": {
                        "backColor": {
                            "red": 255,
                            "green": 246,
                            "blue": 122,
                            "alpha": 0.8
                        },
                        "bold": True,
                        "location": {
                            "zoneId": "0",
                            "startIndex": 74,
                            "endIndex": 75
                        }
                    }
                }
            })
        start = max(start, key, v)
    if start < len(i['title']):
        atitle.append({
            "type": "textRun",
            "textRun": {
                "text": i['title'][start:],
                "style": {
                    "bold": True,
                    "location": {
                        "zoneId": "0",
                        "startIndex": 74,
                        "endIndex": 75
                    }
                }
            }
        })
else:
    atitle.append({
        "type": "textRun",
        "textRun": {
            "text": i['title'],
            "style": {
                "bold": True
            },
            "location": {
                "zoneId": "0",
                "startIndex": 74,
                "endIndex": 75
            }
        }
    })

print([v for j in atitle for k, v in j['textRun'].items() if k == 'text'])
