import logging
import re
import time
import elasticsearch7
import json
from sqlalchemy import text

from EsApi import EsApi
from config.secure import ES_URI, USERNAME, PASSWORD, PORT, MAIN_DB, ES_URI_SMALL, PORT_SMALL
from models.sentence import sentences as Sentence
from RedisClient import RedisClient
from models.base import db as samereportDB
import traceback

from models.sentence_word import SentenceWord


class SearchES(object):
    server = ES_URI
    headers = {'content-type': 'application/json'}
    r = RedisClient(MAIN_DB)
    db = samereportDB
    es = elasticsearch7.Elasticsearch([ES_URI], http_auth=(USERNAME, PASSWORD), port=PORT)

    def __init__(self, order_number):
        self.order_number = order_number
        self.idList = []

    def get_key_words(self, index, keyindex, i, clean_content):
        key_words = []
        for key_index in range(0, i + 1):
            key_str = clean_content[keyindex[index + key_index]['start']:keyindex[index + key_index]['end']]
            key_words.append(key_str)
        return key_words

    def GetSentence(self):
        sentence_info = self.r.client.lpop(f'personal_check_{self.order_number}')
        if sentence_info:
            return str(sentence_info, encoding='utf-8')
        else:
            return None

    def wash_key_words(self, key_words):
        new_key_words = []
        for i in key_words:
            if i not in new_key_words:
                new_key_words.append(i)
        return new_key_words

    def removePunctuation(self, text):
        punctuation = '!,;:?"\'、，；," "()（）：. “”。《》'
        text = re.sub(r'[{}]+'.format(punctuation), '', text)
        return text.strip()

    def similar_rate(self, key_words, original_content):
        single_words = []
        original_content = self.removePunctuation(original_content)
        temp_original_content = original_content
        for sentents in key_words:
            for i in sentents:
                single_words.append(i)
        for i in single_words:
            if temp_original_content.find(i) >= 0:
                temp_original_content = temp_original_content.replace(i, '', 1)
        return round(1 - (len(temp_original_content) / len(original_content)), 2)

    def calculate_keywords_sum(self, keyindex, start, n):
        sum = keyindex[start]['length']
        for i in range(1, n):
            if start + i < len(keyindex):
                sum += keyindex[start + i]['length']
        return sum

    def calculate_sentence_sum(self, keyindex, start, n):
        start_index = keyindex[start]['start']
        if start + n - 1 < len(keyindex):
            end_index = keyindex[start + n - 1]['end']
        else:
            end_index = keyindex[len(keyindex) - 1]['end']
        return end_index - start_index

    def suspicious_part(self, origin_content, clean_content, keyindex):
        base_length = len(origin_content)
        sentence_max_length = int(base_length * 1.3)
        keywords_min_length = int(len(origin_content) * 0.6)
        similar_sentence = []
        for index, item in enumerate(keyindex):
            for i in range(0, len(keyindex) - index):
                keywords_length = self.calculate_keywords_sum(keyindex, index, i + 1)
                sentence_length = self.calculate_sentence_sum(keyindex, index, i + 1)
                if keywords_length >= keywords_min_length and sentence_length <= sentence_max_length:
                    start = keyindex[index]['start']
                    end = keyindex[index + i]['end']
                    info = {
                        'sentence': clean_content[start:end],
                        'key_words': self.get_key_words(index, keyindex, i, clean_content)
                    }
                    similar_sentence.append(info)
        return similar_sentence

    def msearch(self, es_conn, queries, index, doc_type, retries=0):
        """
        Es multi-search query
        :param queries: list of dict, es queries
        :param index: str, index to query against
        :param doc_type: str, defined doc type i.e. event
        :param retries: int, current retry attempt
        :return: list, found docs
        """
        search_header = json.dumps({'index': index, 'type': doc_type})
        request = ''
        for q in queries:
            # request head, body pairs
            request += '{}\n{}\n'.format(search_header, json.dumps(q))
        try:
            resp = es_conn.msearch(body=request, index=index, request_timeout=30)
            found = [r['hits']['hits'] for r in resp['responses']]
        except Exception as e:  # pragma: no cover
            logging.critical("msearch error {} on query {}".format(e, queries))
            return []
        return found

    def check(self, origin_content, content):
        clean_content = content.replace('<em>', '').replace('</em>', '')
        endlist = []
        pre_content = content.replace('<em>', '')
        while pre_content.find('</em>') >= 0:
            index = pre_content.find('</em>')
            endlist.append(index)
            pre_content = pre_content.replace('</em>', '', 1)

        startlist = []
        fore_content = content.replace('</em>', '')
        while fore_content.find('<em>') >= 0:
            index = fore_content.find('<em>')
            startlist.append(index)
            fore_content = fore_content.replace('<em>', '', 1)

        keyindex = []
        for index, item in enumerate(startlist):
            keyindex.append({
                'start': item,
                'end': endlist[index],
                'length': endlist[index] - item
            })
        sentences = self.suspicious_part(origin_content, clean_content, keyindex)
        max_rate = 0
        for item in sentences:
            rate = self.similar_rate(self.wash_key_words(item['key_words']), origin_content)
            if rate > max_rate:
                max_rate = rate
                similar_sentence = item['sentence']
        if max_rate > 0.5:
            return {'rate': max_rate, 'origin_content': origin_content,
                    'similar_content': clean_content.replace(similar_sentence, f'<em>{similar_sentence}</em>')}
        else:
            return {'rate': 0, 'origin_content': origin_content, 'similar_content': ''}

    def apiSearch(self, index, sentences):
        queries = []
        for sentence in sentences:
            user_id = sentence.split('&&')[2]
            queries.append(
                {
                    "query": {
                        "bool": {
                            "must": [
                                {
                                    "match": {
                                        "content": {
                                            "query": sentence.split('&&')[3].replace(' ', '')
                                        }
                                    }
                                },
                                {
                                    "term": {
                                        "user_id": user_id
                                    },
                                },
                                {
                                    "term": {
                                        "delete": 0
                                    },
                                }
                            ]
                        }
                    },
                    "highlight": {"fields": {"content": {"pre_tags": ["<em>"], "post_tags": ["</em>"]}}},
                    "_source": ['filename'],
                    "from": 0,
                    "size": 30
                }
            )
        try:
            q_results = self.msearch(self.es, queries, index, '_doc')
            for index, q_result in enumerate(q_results):
                order_number = sentences[index].split('&&')[0]
                sentence_index = sentences[index].split('&&')[1]
                sentence = sentences[index].split('&&')[3].replace(' ', '')
                if order_number[0] == '8':
                    table = 'sentence_word'
                else:
                    table = 'sentence'
                try:
                    sentence_max_rate = 0
                    for ins in q_result:
                        highlight = ins['highlight']['content']
                        length = len(highlight)
                        highlight_list = []
                        if len(highlight) > 1:
                            for i in range(0, length - 1):
                                highlight_list.append(highlight[i] + highlight[i + 1])
                        else:
                            highlight_list = highlight
                        max_rate = 0
                        max_info = {'rate': 0}
                        for item in highlight_list:
                            similar_content = item.replace(' ', '').replace('</em><em>', '').replace('</em>,<em>',
                                                                                                     '').replace(
                                '</em>，<em>', '').replace('</em>、<em>', '')
                            info = self.check(sentence, similar_content)
                            if info['rate'] > max_rate:
                                max_rate = info['rate']
                                max_info = info
                        if max_info['rate'] >= 0.7:
                            if max_info['rate'] > sentence_max_rate:
                                sentence_max_rate = max_info['rate']
                            for key in list(ins['_source'].keys()):
                                max_info[key] = ins['_source'][key]
                            sql_dict = {}
                            sql_dict['url'] = ''
                            sql_dict['source'] = '自建库'
                            sql_dict['author'] = '自建库'
                            sql_dict['status'] = 1
                            sql_dict['order_number'] = order_number
                            sql_dict['sentence_index'] = sentence_index
                            sql_dict['original_content'] = sentence
                            sql_dict['similar_content'] = max_info['similar_content']
                            sql_dict['rate'] = max_info['rate']
                            sql_dict['title'] = max_info['filename'][0:78]
                            sql_dict['create_time'] = int(time.time())
                            insert_sql = '''INSERT INTO sentence_detail(`status`,`order_number`,`author`,`sentence_index`,`original_content`,`similar_content`,`rate`,`source`,`title`,`create_time`,`url`)
                                                            VALUES(:status,:order_number,:author,:sentence_index,:original_content,:similar_content,:rate,:source,:title,:create_time,:url)'''
                            with self.db.auto_commit():
                                self.db.session.execute(text(insert_sql), sql_dict)
                    if sentence_max_rate >= 0.7:
                        if 0.7 > sentence_max_rate:
                            upadte_sql = f"UPDATE {table} SET is_similar=2,rate=if({table}.rate>{sentence_max_rate},{table}.rate,{sentence_max_rate}) \
                            where order_number = '{order_number}' and sentence_index = '{sentence_index}' and is_similar != '1'"
                        else:
                            upadte_sql = f"UPDATE {table} SET is_similar=1,rate=if({table}.rate>{sentence_max_rate},{table}.rate,{sentence_max_rate}) \
                            where order_number = '{order_number}' and sentence_index = '{sentence_index}'"
                        with self.db.auto_commit():
                            self.db.session.execute(upadte_sql)
                            if table == 'sentence':
                                sentence_info = self.db.session.query(Sentence).filter_by(
                                    order_number=order_number).filter_by(
                                    sentence_index=sentence_index).first()
                                if sentence_info.is_similar != 0 and sentence_info.suggest == '':
                                    self.r.push_suggest_sentence(f'{order_number}#{sentence_index}#{sentence}')
                            else:
                                sentence_info = self.db.session.query(SentenceWord).filter_by(
                                    order_number=order_number).filter_by(
                                    sentence_index=sentence_index).first()
                                if sentence_info.is_similar != 0 and sentence_info.suggest == '':
                                    self.r.push_suggest_sentence(f'{order_number}#{sentence_index}#{sentence}')
                except Exception as e:
                    with open('internet_error.log', 'a+') as f:
                        log = f'{order_number}-----{str(e)}\n\n'
                        f.writelines(log)
        except:
            pass

    def insertPolymerize(self, id, index):
        query = {
            "query": {
                "match_phrase": {
                    "_id": id
                }
            }
        }
        searchResponse = self.es.search(body=query, index=index)
        if searchResponse['hits']['total']['value'] == 1:
            self.es.index(index=f'last_{self.order_number}', id=id,
                          document=searchResponse['hits']['hits'][0]['_source'],
                          refresh=True)

    def searchDebug(self, sentence):
        query = {
            "query": {"match": {"content": {
                "query": sentence
            }}},
            "highlight": {"fields": {"content": {"pre_tags": ["<em>"], "post_tags": ["</em>"]}}},
            "_source": ['title', 'author', 'periodical', 'source', 'url', "university"],
            "from": 0,
            "size": 30
        }
        searchResponse = self.es.search(index=self.order_number, body=query, doc_type='_doc')
        for ins in searchResponse['hits']['hits']:
            highlight = ins['highlight']['content']
            length = len(highlight)
            highlight_list = []
            for i in range(0, length - 1):
                highlight_list.append(highlight[i] + highlight[i + 1])
            max_rate = 0
            max_info = {'rate': 0}
            for item in highlight_list:
                similar_content = item.replace(' ', '').replace('</em><em>', '').replace('</em>,<em>',
                                                                                         '').replace(
                    '</em>，<em>', '').replace('</em>、<em>', '')
                info = self.check(sentence, similar_content)
                if info['rate'] > max_rate:
                    max_rate = info['rate']
                    max_info = info
            print(max_info)

    def countUsefulKeys(self, keys):
        sum = 0
        for item in keys:
            if len(item) >= 2:
                sum += 1
        return sum

    def batch_reqeust(self, batch_size):
        found = 0
        sentences = []
        empty = False
        while found < batch_size:
            sentence_info = self.GetSentence()
            if sentence_info:
                sentences.append(sentence_info)
                found += 1
            else:
                empty = True
                break
        return empty, sentences


def run():
    r = RedisClient(MAIN_DB)
    while True:
        task = r.GetPersonalCheckTask()
        print(task)
        if task:
            bot = SearchES(task)
            while True:
                empty, sentences = bot.batch_reqeust(30)
                try:
                    bot.apiSearch('personal_documents', sentences)
                except Exception as e:
                    traceback.print_exc()
                if empty:
                    r.client.rpush('report_dev', task)
                    break
                time.sleep(1)
        else:
            print('sleep 5 ...')
            time.sleep(5)


if __name__ == '__main__':
    run()
