import requests, random, re, logging
from bs4 import BeautifulSoup
from lxml import etree

_sougou_url = 'https://weixin.sogou.com/weixin'
_sougou_data = {
    'type': 1,
    's_from': 'input',
    'query': '',
    'ie': 'utf8',
    '_sug_': 'n',
    '_sug_type_': ''
}

def init_data(query):
    '''初始化数据'''
    _sougou_data['query'] = query

def get_init_url(_sougou_headers: str):
    '''获取初始url'''
    try:
        sougou_page = requests.get(url=_sougou_url,
                                   headers=_sougou_headers,
                                   params=_sougou_data)
        if (sougou_page.status_code == 200):
            logging.info(f'文章初始url获取成功')
            sougou_cookie = requests.utils.dict_from_cookiejar(
                sougou_page.cookies)
            sougou_page = BeautifulSoup(sougou_page.text, 'lxml')
            sougou_page_data = sougou_page.select(
                '#sogou_vr_11002301_box_0 > dl:nth-child(3) > dd > a'
            )[0]['href']
            b = int(random.random() * 100) + 1
            a = sougou_page_data.find("url=")
            weixin_url = 'https://weixin.sogou.com' + sougou_page_data + "&k=" + str(
                b) + "&h=" + sougou_page_data[a + 25 + b:a + 26 + b]
            return sougou_cookie, weixin_url
        else:
            logging.warning(f'文章初始url获取失败，状态码{str(sougou_page.status_code)}')
            return None
    except Exception as e:
        logging.warning(
            f'文章初始url获取错误，原因为{str(e)}，位置{str(e.__traceback__.tb_lineno)}')
        return None


def get_real_url(weixin_url: str, sougou_cookie: str, _sougou_headers: str):
    '''获取真实url'''
    try:
        weixin_page = requests.get(url=weixin_url,
                                   headers=_sougou_headers,
                                   cookies=sougou_cookie)
        if (weixin_page.status_code == 200):
            logging.info(f'文章真实url获取成功')
            weixin_url_re = re.findall(r'(?<=url \+= \').*?(?=\')',
                                       weixin_page.text)
            weixin_real_url = ""
            for w in weixin_url_re:
                weixin_real_url += w
            return weixin_real_url
        else:
            logging.warning(f'文章真实url获取失败，状态码{str(weixin_page.status_code)}')
            return None
    except Exception as e:
        logging.warning(
            f'文章真实url获取错误，原因为{str(e)}，位置{str(e.__traceback__.tb_lineno)}')
        return None


def get_article_data(weixin_real_url: str, _sougou_headers: str):
    '''获取文章内容'''
    try:
        weixin_data = requests.get(url=weixin_real_url,
                                   headers=_sougou_headers)
        if (weixin_data.status_code == 200):
            weixin_page = etree.HTML(weixin_data.text)
            weixin_page_data_list = weixin_page.xpath(
                '//*[@id="js_content"]//text()')
            weixin_page_data = []
            for i in weixin_page_data_list[1:-2]:
                weixin_page_data.append(i)
                weixin_page_data.append("\n\n")
            weixin_article = ''.join(weixin_page_data[:-1])
            logging.info(f'文章内容获取成功')
            return weixin_article
        else:
            logging.warning(f'文章内容获取失败，状态码{str(weixin_data.status_code)}')
            return None
    except Exception as e:
        logging.warning(
            f'文章获取错误，原因为{str(e)}，位置{str(e.__traceback__.tb_lineno)}')
        return None