import multiprocessing
import re
import time
import urllib.parse

import cchardet
import elasticsearch7
import grequests
import requests

from config.secure import ES_URI, USERNAME, PASSWORD, PORT, ES_URI_SMALL, PORT_SMALL
from log.log import log


class SnapContent(object):

    @classmethod
    def snapContent(cls, req_list, index):
        request_list = []
        for item in req_list:
            headers = {
                'url_title': urllib.parse.quote(item['title'])
            }
            request_list.append(
                grequests.get(item['snap_url'], headers=headers, verify=False, timeout=30))
        res_list = grequests.map(request_list, gtimeout=10)
        t0 = time.clock()
        process_count = 0
        jobs = []
        while len(res_list) > 0:
            while process_count < 8:
                if len(res_list) > 0:
                    res = res_list.pop()
                    if res and res.status_code == 200:
                        process_count += 1
                        title = urllib.parse.unquote(res.request.headers._store['url_title'][1])
                        p = multiprocessing.Process(target=get_content, args=(res, title, index))
                        jobs.append(p)
                        p.start()
                else:
                    break
            for proc in jobs:
                proc.join()
            process_count = 0
            jobs = []
        print('耗时: ' + str(time.clock() - t0))


def filter_tags(htmlstr):
    # 先过滤CDATA
    re_cdata = re.compile('//<![CDATA[[^>]*//]]>', re.I)  # 匹配CDATA
    re_script = re.compile('<s*script[^>]*>[^<]*<s*/s*scripts*>', re.I)  # Script
    re_style = re.compile('<s*style[^>]*>[^<]*<s*/s*styles*>', re.I)  # style
    re_br = re.compile('<brs*?/?>')  # 处理换行
    re_h = re.compile('</?w+[^>]*>')  # HTML标签
    re_comment = re.compile('<!--[^>]*-->')  # HTML注释
    s = re_cdata.sub('', htmlstr)  # 去掉CDATA
    s = re_script.sub('', s)  # 去掉SCRIPT
    s = re_style.sub('', s)  # 去掉style
    s = re_br.sub('n', s)  # 将br转换为换行
    s = re_h.sub('', s)  # 去掉HTML 标签
    s = re_comment.sub('', s)  # 去掉HTML注释
    # 去掉多余的空行
    blank_line = re.compile('n+')
    s = blank_line.sub('n', s)
    s = replaceCharEntity(s)  # 替换实体
    return s


def replaceCharEntity(htmlstr):
    CHAR_ENTITIES = {'nbsp': ' ', '160': ' ',
                     'lt': '<', '60': '<',
                     'gt': '>', '62': '>',
                     'amp': '&', '38': '&',
                     'quot': '"', '34': '"', }

    re_charEntity = re.compile(r'&#?(?P<name>w+);')
    sz = re_charEntity.search(htmlstr)
    while sz:
        entity = sz.group()  # entity全称，如>
        key = sz.group('name')  # 去除&;后entity,如>为gt
        try:
            htmlstr = re_charEntity.sub(CHAR_ENTITIES[key], htmlstr, 1)
            sz = re_charEntity.search(htmlstr)
        except KeyError:
            # 以空串代替
            htmlstr = re_charEntity.sub('', htmlstr, 1)
            sz = re_charEntity.search(htmlstr)
    return htmlstr


def repalce(s, re_exp, repl_string):
    return re_exp.sub(repl_string, s)


def count_zh(paragraph):
    count = 0
    zh = ''
    for s in paragraph:
        if '\u4e00' <= s <= '\u9fa5':
            count += 1
            zh += s
    return count, zh


def get_content(response, title, index):
    try:
        if response.apparent_encoding:
            content = response.text.encode(response.encoding).decode(response.apparent_encoding, errors='ignore')
        else:
            encoding = cchardet.detect(response.content)['encoding']
            content = response.content.decode(encoding, errors='ignore')
        dr = re.compile(r'<[^>]+>', re.S)
        dd = dr.sub('', content)
        text = re.sub('\s', '', dd)
        clean_content = filter_tags(text)
        info = {
            'title': title,
            'content': clean_content,
            'url': response.url,
            'source': '互联网'
        }
        insertIntoEs(info, index)
    except Exception as e:
        log('解析错误：' + str(e))

def check_status():
    res = requests.get('http://111.173.119.77:9200/_cat/health')
    if res.status_code == 200:
        if 'green' in res.text:
            return True
        else:
            return False

def insertIntoEs(info, index):
    body = []
    timestamp = int(time.time())
    itemLength = len(info['content'])
    sliceTotal = int(itemLength / 1000) + 1
    for i in range(sliceTotal):
        start = i * 1000
        end = start + 1100
        if end > itemLength:
            end = itemLength
        sliceContent = info['content'][start:end]
        createBody = {"index": {"_index": index}}
        iteminfo = {
            "content": sliceContent,
            "title": info['title'],
            "url": info['url'],
            "source": info['source'],
            "slice": i,
            "time": timestamp
        }
        body.append(createBody)
        body.append(iteminfo)
    try:
        es = elasticsearch7.Elasticsearch([ES_URI_SMALL], http_auth=(USERNAME, PASSWORD), port=PORT_SMALL)
        es.bulk(body=body, index=index, refresh=True, request_timeout=200)
    except Exception as e:
        print(e)


def check_url(url):
    body = {
        'query': {
            'match_phrase': {
                'url': url
            }
        }
    }
    es = elasticsearch7.Elasticsearch([ES_URI], http_auth=(USERNAME, PASSWORD), port=PORT)
    res = es.search(body=body, index='internet')
    if res['hits']['total']['value'] > 0:
        return False
    else:
        return True


if __name__ == '__main__':
    # url = 'http://www.075564.net/fawu/fagui/6876.html'
    # check_url(url)
    res = check_status()
    print(res)
