import grequests
import logging
import re
import ssl
import time
import urllib.parse
import elasticsearch7
from lxml import etree
from lxml.etree import tostring
import requests
import json
from elasticsearch7 import exceptions as es_exceptions
from sqlalchemy import text
from config.secure import ES_URI, USERNAME, PASSWORD, PORT
from requests.auth import HTTPBasicAuth
from models.sentence import sentences as Sentence
from RedisClient import RedisClient, RedisClientAdsl
from headers import Headers
from models.base import db as samereportDB
import traceback

from models.sentence_word import SentenceWord


class SearchES(object):
    server = ES_URI
    headers = {'content-type': 'application/json'}
    r = RedisClient()
    adslr = RedisClientAdsl()
    db = samereportDB
    es = elasticsearch7.Elasticsearch([ES_URI], http_auth=(USERNAME, PASSWORD), port=PORT)

    def __init__(self, order_number):
        self.order_number = order_number
        self.idList = []

    def GetSentence(self):
        sentence_info = self.r.client.lpop(f'writer_local_{self.order_number}')
        if sentence_info:
            return str(sentence_info, encoding='utf-8')
        else:
            return None

    def wash_key_words(self, key_words):
        new_key_words = []
        for i in key_words:
            if i not in new_key_words:
                new_key_words.append(i)
        return new_key_words

    def removePunctuation(self, text):
        punctuation = '!,;:?"\'、，；," "()（）：. “”。《》'
        text = re.sub(r'[{}]+'.format(punctuation), '', text)
        return text.strip()

    def similar_rate(self, key_words, original_content):
        single_words = []
        original_content = self.removePunctuation(original_content)
        temp_original_content = original_content
        for sentents in key_words:
            for i in sentents:
                single_words.append(i)
        for i in single_words:
            if temp_original_content.find(i) >= 0:
                temp_original_content = temp_original_content.replace(i, '', 1)
        return round(1 - (len(temp_original_content) / len(original_content)), 2)

    def calculate_keywords_sum(self, keyindex, start, n):
        sum = keyindex[start]['length']
        for i in range(1, n):
            if start + i < len(keyindex):
                sum += keyindex[start + i]['length']
        return sum

    def calculate_sentence_sum(self, keyindex, start, n):
        start_index = keyindex[start]['start']
        if start + n - 1 < len(keyindex):
            end_index = keyindex[start + n - 1]['end']
        else:
            end_index = keyindex[len(keyindex) - 1]['end']
        return end_index - start_index

    def suspicious_part(self, origin_content, clean_content, keyindex):
        base_length = len(origin_content)
        sentence_max_length = int(base_length * 1.3)
        keywords_min_length = int(len(origin_content) * 0.6)
        similar_sentence = []
        for index, item in enumerate(keyindex):
            for i in range(0, len(keyindex) - index):
                keywords_length = self.calculate_keywords_sum(keyindex, index, i + 1)
                sentence_length = self.calculate_sentence_sum(keyindex, index, i + 1)
                if keywords_length >= keywords_min_length and sentence_length <= sentence_max_length:
                    start = keyindex[index]['start']
                    end = keyindex[index + i]['end']
                    info = {
                        'sentence': clean_content[start:end],
                        'key_words': self.get_key_words(index, keyindex, i , clean_content)
                    }
                    similar_sentence.append(info)
        return similar_sentence

    def msearch(self, es_conn, queries, index, doc_type, retries=0):
        """
        Es multi-search query
        :param queries: list of dict, es queries
        :param index: str, index to query against
        :param doc_type: str, defined doc type i.e. event
        :param retries: int, current retry attempt
        :return: list, found docs
        """
        search_header = json.dumps({'index': index, 'type': doc_type})
        request = ''
        for q in queries:
            # request head, body pairs
            request += '{}\n{}\n'.format(search_header, json.dumps(q))
        try:
            resp = es_conn.msearch(body=request, index=index, request_timeout=30)
            found = [r['hits']['hits'] for r in resp['responses']]
        except Exception as e:  # pragma: no cover
            logging.critical("msearch error {} on query {}".format(e, queries))
            return []
        return found

    def check(self, origin_content, content):
        clean_content = content.replace('<em>', '').replace('</em>', '')
        endlist = []
        pre_content = content.replace('<em>', '')
        while pre_content.find('</em>') >= 0:
            index = pre_content.find('</em>')
            endlist.append(index)
            pre_content = pre_content.replace('</em>', '', 1)

        startlist = []
        fore_content = content.replace('</em>', '')
        while fore_content.find('<em>') >= 0:
            index = fore_content.find('<em>')
            startlist.append(index)
            fore_content = fore_content.replace('<em>', '', 1)


        keyindex = []
        for index, item in enumerate(startlist):
            keyindex.append({
                'start': item,
                'end': endlist[index],
                'length': endlist[index] - item
            })
        sentences = self.suspicious_part(origin_content, clean_content, keyindex)
        max_rate = 0
        for item in sentences:
            rate = self.similar_rate(self.wash_key_words(item['key_words']), origin_content)
            if rate > max_rate:
                max_rate = rate
                similar_sentence = item['sentence']
        if max_rate > 0.5:
            return {'rate': max_rate, 'origin_content': origin_content,
                    'similar_content': clean_content.replace(similar_sentence, f'<em>{similar_sentence}</em>')}
        else:
            return {'rate': 0, 'origin_content': origin_content, 'similar_content': ''}

    def insertPolymerize(self, id, index):
        query = {
            "query": {
                "match_phrase": {
                    "_id": id
                }
            }
        }
        searchResponse = self.es.search(body=query, index=index)
        if searchResponse['hits']['total']['value'] == 1:
            self.es.index(index=self.order_number, id=id, document=searchResponse['hits']['hits'][0]['_source'],
                          refresh=True)

    def apiSearch(self, index, sentences):
        queries = []
        for sentence in sentences:
            queries.append(
                {
                    "query": {"match": {"content": {
                        "query": sentence.split('&&')[2].replace(' ', '')
                    }}},
                    "highlight": {"fields": {"content": {"pre_tags": ["<em>"], "post_tags": ["</em>"]}}},
                    "_source": ['title', 'author', 'periodical', 'source', 'url', "university"],
                    "from": 0,
                    "size": 15
                }
            )
        try:
            q_results = self.msearch(self.es, queries, index, '_doc')
            for index, q_result in enumerate(q_results):
                order_number = sentences[index].split('&&')[0]
                sentence_index = sentences[index].split('&&')[1]
                sentence = sentences[index].split('&&')[2].replace(' ', '')
                if order_number[0] == '8':
                    table = 'sentence_word'
                else:
                    table = 'sentence'
                try:
                    sentence_max_rate = 0
                    for ins in q_result:
                        highlight = ins['highlight']['content']
                        length = len(highlight)
                        highlight_list = []
                        if len(highlight) > 1:
                            for i in range(0, length - 1):
                                highlight_list.append(highlight[i] + highlight[i + 1])
                        else:
                            highlight_list = highlight
                        max_rate = 0
                        max_info = {'rate': 0}
                        for item in highlight_list:
                            similar_content = item.replace(' ', '').replace('</em><em>', '').replace('</em>,<em>',
                                                                                                     '').replace(
                                '</em>，<em>', '').replace('</em>、<em>', '')
                            info = self.check(sentence, similar_content)
                            if info['rate'] > max_rate:
                                max_rate = info['rate']
                                max_info = info
                        if max_info['rate'] >= 0.6:
                            if max_info['rate'] > sentence_max_rate:
                                sentence_max_rate = max_info['rate']
                            for key in list(ins['_source'].keys()):
                                max_info[key] = ins['_source'][key]
                            sql_dict = {}
                            if max_info.__contains__('periodical'):
                                sql_dict['source'] = max_info['periodical']
                            if max_info.__contains__('source'):
                                sql_dict['source'] = max_info['source']
                                if max_info['source'] == '互联网文档库':
                                    sql_dict['source'] = '大学生论文联合对比库'
                            if max_info.__contains__('university'):
                                sql_dict['source'] = max_info['university']
                            if max_info.__contains__('author'):
                                sql_dict['author'] = max_info['author'][0:48]
                            else:
                                sql_dict['author'] = '佚名'
                            sql_dict['status'] = 1
                            sql_dict['order_number'] = order_number
                            sql_dict['sentence_index'] = sentence_index
                            sql_dict['original_content'] = sentence
                            sql_dict['similar_content'] = max_info['similar_content']
                            sql_dict['rate'] = max_info['rate']
                            sql_dict['title'] = max_info['title'][0:78]
                            sql_dict['create_time'] = int(time.time())
                            insert_sql = '''INSERT INTO sentence_detail(`status`,`order_number`,`author`,`sentence_index`,`original_content`,`similar_content`,`rate`,`source`,`title`,`create_time`)
                                                VALUES(:status,:order_number,:author,:sentence_index,:original_content,:similar_content,:rate,:source,:title,:create_time)'''
                            if max_info['rate'] > 0.75:
                                if ins['_id'] not in self.idList:
                                    self.idList.append(ins['_id'])
                                    self.insertPolymerize(ins['_id'], 'periodical')
                            with self.db.auto_commit():
                                self.db.session.execute(text(insert_sql), sql_dict)
                    if sentence_max_rate >= 0.6:
                        if 0.7 > sentence_max_rate:
                            upadte_sql = f"UPDATE {table} SET is_similar=2,rate=if({table}.rate>{sentence_max_rate},{table}.rate,{sentence_max_rate}) where order_number = '{order_number}' and sentence_index = '{sentence_index}' and is_similar != '1'"
                        else:
                            upadte_sql = f"UPDATE {table} SET is_similar=1,rate=if({table}.rate>{sentence_max_rate},{table}.rate,{sentence_max_rate}) where order_number = '{order_number}' and sentence_index = '{sentence_index}'"
                        with self.db.auto_commit():
                            self.db.session.execute(upadte_sql)
                            if table == 'sentence':
                                sentence_info = self.db.session.query(Sentence).filter_by(
                                    order_number=order_number).filter_by(
                                    sentence_index=sentence_index).first()
                                if sentence_info.is_similar != 0 and sentence_info.suggest == '':
                                    self.r.push_suggest_sentence(f'{order_number}#{sentence_index}#{sentence}')
                            else:
                                sentence_info = self.db.session.query(SentenceWord).filter_by(
                                    order_number=order_number).filter_by(
                                    sentence_index=sentence_index).first()
                                if sentence_info.is_similar != 0 and sentence_info.suggest == '':
                                    self.r.push_suggest_sentence(f'{order_number}#{sentence_index}#{sentence}')
                    else:
                        self.r.push_polymerize(self.order_number, f'{order_number}&&{sentence_index}&&{sentence}')
                except Exception as e:
                    with open('internet_error.log', 'a+') as f:
                        log = f'{order_number}-----{str(e)}\n\n'
                        f.writelines(log)
        except:
            pass

    def get_key_words(self, index, keyindex, i, clean_content):
        key_words = []
        for key_index in range(0, i+1):
            key_str = clean_content[keyindex[index + key_index]['start']:keyindex[index + key_index]['end']]
            key_words.append(key_str)
        return key_words

    def countUsefulKeys(self, keys):
        sum = 0
        for item in keys:
            if len(item) >= 2:
                sum += 1
        return sum

    def batch_reqeust(self, batch_size):
        found = 0
        sentences = []
        empty = False
        while found < batch_size:
            sentence_info = bot.GetSentence()
            if sentence_info:
                sentences.append(sentence_info)
                found += 1
            else:
                empty = True
                break
        return empty, sentences


if __name__ == '__main__':
    r = RedisClient()
    # bot = SearchES('12321423')
    # bot.apiSearch('periodical', ['81641328414217&&109&&在互联网时代，一部电影的成功，除了导演要有影响力、影片质量要高、演员表演要到位外，&&互联网**时代**一部**电影**成功**除了**导演**影响力**影片**质量**演员**表演**到位'])
    while True:
        task = r.GetWriterTask()
        print(task)
        if task:
            bot = SearchES(task)
            while True:
                empty, sentences = bot.batch_reqeust(5)
                if empty:
                    status = r.CheckTaskStatus(task)
                    print(status)
                    if status is not None and status == 0:
                        r.UpdateTaskStatus(task, 1)
                    else:
                        r.DeleteTaskStatus(task)
                        r.client.rpush('polymerization_task', task)
                    break
                try:
                    bot.apiSearch('periodical', sentences)
                except Exception as e:
                    traceback.print_exc()
        else:
            print('sleep 5 ...')
            time.sleep(5)
