# encoding=utf-8
import grequests
import logging
import time
import urllib.parse
import elasticsearch7
import urllib3
from lxml import etree
import requests
import json
from elasticsearch7 import exceptions as es_exceptions
import sys
from SnapContent import SnapContent
from config.secure import ES_URI, USERNAME, PASSWORD, PORT, MAIN_DB
from RedisClient import RedisClient
from headers import Headers
import ssl
from log.log import log


class SearchInternet(object):
    server = ES_URI
    headers = {'content-type': 'application/json'}
    r = RedisClient(MAIN_DB)
    cookie_r = RedisClient(MAIN_DB)
    es = elasticsearch7.Elasticsearch([ES_URI], http_auth=(USERNAME, PASSWORD), port=PORT)
    ssl._create_default_https_context = ssl._create_unverified_context

    def __init__(self, order_number, ip):
        self.order_number = order_number
        self.ip = ip
        self.urlList = []

    def msearch(self, es_conn, queries, index, doc_type, retries=0):
        """
        Es multi-search query
        :param queries: list of dict, es queries
        :param index: str, index to query against
        :param doc_type: str, defined doc type i.e. event
        :param retries: int, current retry attempt
        :return: list, found docs
        """
        search_header = json.dumps({'index': index, 'type': doc_type})
        request = ''
        for q in queries:
            # request head, body pairs
            request += '{}\n{}\n'.format(search_header, json.dumps(q))
        try:
            resp = es_conn.msearch(body=request, index=index, request_timeout=30)
            found = [r['hits']['hits'] for r in resp['responses']]
        except (es_exceptions.ConnectionTimeout, es_exceptions.ConnectionError,
                es_exceptions.TransportError):  # pragma: no cover
            logging.warning("msearch connection failed, retrying...")  # Retry on timeout
            if retries > 2:  # pragma: no cover
                raise
            time.sleep(2)
            found = self.msearch(self.es, queries=queries, index=index, doc_type='_doc', retries=retries + 1)
        except Exception as e:  # pragma: no cover
            logging.critical("msearch error {} on query {}".format(e, queries))
            raise
        return found

    def internetSearch(self, key_words):
        try:
            retry_times = int(key_words.split('#')[1])
        except:
            retry_times = 0
        key_words = key_words.split('#')[0]
        log(f'重复次数:{str(retry_times)}')
        if retry_times > 1:
            log('超过错误次数')
            return None
        log(key_words)

        cookieString = self.createCookieString()
        referer = self.cookie_r.get_referer(self.ip)
        headers = Headers.headers(cookieString, referer)

        t0 = time.perf_counter()
        url = self.makeSearchUrl(key_words)
        # proxies = self.proxy()
        proxies = {}
        try:
            urllib3.disable_warnings()
            res = requests.get(url, headers=headers, timeout=5, verify=False, proxies=proxies)
        except Exception as e:
            self.r.RecoverKeyWords(self.order_number, f'{key_words}#{str(retry_times + 1)}')
            log('请求错误' + str(e))
            return None
        log('请求耗时' + str(time.perf_counter() - t0) + str(res.status_code))
        if res.status_code == 200:
            tree = etree.HTML(res.text)
            log('返回内容长度：' + str(len(res.text)))
            if len(res.text) == 1:
                try:
                    data = {'url': url}
                    res = requests.post('http://baidu:8000/search', headers=self.headers, data=json.dumps(data),
                                        timeout=5, verify=False)
                    if res.status_code == 200:
                        res_data = json.loads(res.content)
                        html_text = res_data['content']
                        tree = etree.HTML(html_text)
                        log('模拟百度:' + str(len(html_text)))
                except:
                    return None
            else:
                self.cookie_r.set_url_param(self.ip, 'referer', url)
                self.updateCookie(res.cookies)
            try:
                content_xpath = '//div[contains(@class,"result c-container")]'
                contents = tree.xpath(content_xpath)
                self.parseInputParam(tree)
                log('contents:' + str(len(contents)))
            except Exception as e:
                return None

            req_list = []
            for result in contents:
                try:
                    title = result.xpath('./div[@class="c-container"]/div/h3')[0].xpath('string(.)').replace('...', '')
                    snap_url = str(result.xpath('@mu')[0])
                    if snap_url not in self.urlList:
                        self.urlList.append(snap_url)
                        info = {
                            'title': title,
                            'snap_url': snap_url
                        }
                        unexpect_domain = ['.docin.com', '.doc88.com', 'wenku.baidu', 'doc.mbalib', 'max.book118']
                        if not any(key in snap_url for key in unexpect_domain):
                            req_list.append(info)
                except:
                    continue
            return SnapContent.snapContent(req_list, self.order_number)

    def parseInputParam(self, htmlTree):
        inputXpath = '//form[@id="form"]/input'
        contents = htmlTree.xpath(inputXpath)
        self.cookie_r.clear_url_param(self.ip)
        for item in contents:
            name = item.attrib['name']
            value = item.attrib['value']
            self.cookie_r.set_url_param(self.ip, name, value)

    def proxy(self):
        adsl = self.r.getADSL()

        if not adsl:
            self.r.init_adsl(1)
            adsl = self.r.getADSL()

        proxies = {
            'http': 'http://qmmm00:1o1xgq3z@' + adsl,
            'https': 'http://qmmm00:1o1xgq3z@' + adsl,
        }
        return proxies

    def makeSearchUrl(self, key_words):
        prefix = 'https://www.baidu.com/s?'
        params = self.cookie_r.get_url_param(self.ip)
        str = ''
        for key in params.keys():
            str += f'{key}={params[key]}&'
        url = f'{prefix}{str}wd={urllib.parse.quote(key_words)}'
        return url

    def createCookieString(self):
        cookieString = self.cookie_r.get_cookies(self.ip)
        cookieDict = json.loads(cookieString)
        cookies = ''
        for key in cookieDict.keys():
            cookies += f"{key}={cookieDict[key]};"
        return cookies

    def updateCookie(self, responseCookie):
        set_cookies = requests.utils.dict_from_cookiejar(responseCookie)
        cookieString = self.cookie_r.get_cookies(self.ip)
        cookieDict = json.loads(cookieString)
        for key, value in set_cookies.items():
            cookieDict[key] = value
        self.cookie_r.set_cookies(self.ip, json.dumps(cookieDict))


def run(ip):
    r = RedisClient(MAIN_DB)
    while True:
        task = r.GetPreInternetTask()
        if task:
            bot = SearchInternet(task, ip)
            while True:
                key_words = r.GetInternetTaskKeyWords(task)
                if key_words:
                    # bot.internetSearch(key_words)
                    time.sleep(2)
                else:
                    r.client.rpush('internet_task', task)
                    break
        else:
            print('sleep 2 ...')
            time.sleep(2)


if __name__ == '__main__':
    from gevent import monkey

    monkey.patch_all()
    ip = sys.argv[1]
    run(ip)
