# -*- coding: UTF-8 -*-
# Project : bio_tc
# File : sougou_weixin.py
# IDE : PyCharm
# Author : 博科（鑫贝西）田聪
# Date : 2021/12/2 10:19

# PS: 搜狗微信爬虫

import time
import random
import requests
import datetime

from lxml import etree
from retrying import retry

from new_tools.toMysql import MySqlLink
from new_tools.sha import sha_1

class sougou_Demo:
    def __init__(self):
        self.start_time = datetime.datetime.now()  # 现在的时间 算5分钟
        self.start_time_1 = datetime.datetime.now()  # 现在的时间 算30分钟
        self.ua = [
            'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1464.0 Safari/537.36',
            'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.14 (KHTML, like Gecko) Chrome/24.0.1292.0 Safari/537.14',
            'Mozilla/5.0 (X11; CrOS i686 3912.101.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36',
            'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1464.0 Safari/537.36',
            'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.2 Safari/537.36',
            'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
            'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
            'Mozilla/5.0 (X11; NetBSD) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36',
            'Mozilla/5.0 (X11; NetBSD) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36',
            'Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
            'Mozilla/5.0 (X11; CrOS i686 4319.74.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36',
        ]
        self.user_agent = random.choice(self.ua)
        self.cookie = self.getCookieContainSNUID()
        self.flag = 0


    # 获取cookie
    @retry(stop_max_attempt_number=4, wait_incrementing_start=1000, wait_incrementing_increment=1500)
    def getCookieContainSNUID(self):
        url = r'https://www.sogou.com/'
        headers = {
            'User-Agent': random.choice(self.ua),
        }
        time.sleep(random.randint(5,10))
        response = requests.get(url, headers=headers)
        cookie_dict = response.cookies.get_dict()  # {'IPLOC': 'CN3701', 'SUID': '4A0C80717450A00A0000000060DFD858', 'ABTEST': '0|1625282648|v17'}
        # 获取snuid
        # 将cookiedict拼接
        cookies = ';'.join([f'{k}={v}' for k, v in
                            cookie_dict.items()]) + f';PHPSESSID=rfrcqafv5v74hbgpt98ah20vf3;SUIR={int(time.time())};'  # IPLOC=CN3701;SUID=4A0C80717450A00A0000000060DFD945;ABTEST=6|1625282885|v17;PHPSESSID=rfrcqafv5v74hbgpt98ah20vf3;SUIR=1625282874;
        url = "https://www.sogou.com/web?query=333&_asf=www.sogou.com&_ast=1488955851&w=01019900&p=40040100&ie=utf8&from=index-nologin"
        headers['Cookie'] = cookies
        response = requests.head(url, headers=headers)
        SNUID = response.headers['set-cookie'].split(';')[0]
        return cookies + SNUID

    # 获取请求
    def request_get(self, url, flag=5):
        flag -= 1

        end_Time = datetime.datetime.now()  ## 现在的时间
        if (end_Time - self.start_time).seconds / 60 > 5:  # 计算两个时间的时间差
            self.user_agent = random.choice(self.ua)
            self.start_time = end_Time
            self.cookie = self.getCookieContainSNUID()

        headers = {
            'User-Agent': self.user_agent,
            'Cookie': self.cookie,
        }
        time.sleep(random.randint(5,10))
        try:
            response = requests.get(
                url=url,
                headers=headers,
            )
            response.encoding = response.apparent_encoding
            return response

        except requests.exceptions.SSLError as e:
            print(e)
            print(f"超时..更换代理第{flag}次")
            self.user_agent = random.choice(self.ua)
            if flag >= 0:
                return self.request_get(url, flag)

        '''redis读取关键词----多进程请求关键词'''

    def start_request(self):
        # keywords = Redis_DB().r.hgetall('subject')  # 获取关键词
        keyword_list = ['中国成功发射风云三号05星', '航天员出舱七个小时怎么喝', '礼兵队伍俯拍太震撼了']

        for i in set(keyword_list):
            url = f'https://weixin.sogou.com/weixin?type=2&query={i}&page=1'
            self.htmljiexi(self.request_get(url).text)

        # with ThreadPoolExecutor(max_workers=3) as executor:
        #     htmls = executor.map(self.request_get, [f'https://weixin.sogou.com/weixin?type=2&query={i}&page=1' for i in
        #                                             set(keyword_list)])
        #     executor.shutdown(wait=True)
        # with ThreadPoolExecutor(max_workers=10) as executor:
        #     executor.map(self.htmljiexi, [response.text for response in htmls])
        #     executor.shutdown(wait=True)
    """
    提取详情页链接,遍历执行
    """

    def htmljiexi(self, html: str):
        html = html.replace('<!--red_beg-->', '').replace('<!--red_end-->', '')
        # html_ = html
        # print(html)
        html = etree.HTML(html)
        links = html.xpath(r'//ul[@class="news-list"]/li/div[@class="txt-box"]')
        for link in links:
            item = {}
            try:
                url = r'https://weixin.sogou.com' + link.xpath(r'./h3/a/@href')[0]
                title = ''.join(link.xpath(r'./h3//text()')).strip()
                pubtime = link.xpath(r'./div[@class="s-p"]/@t')[0]
                item['title'] = title
                item['pubtime'] = pubtime
                item['id'] = sha_1(title,pubtime)
                self.parse_content(url, item)
            except IndexError as e:
                print(e)

    """
    获取详情页链接
    """

    def parse_content(self, url, item):
        import re
        response = self.request_get(url).text
        var_url = str(
            ''.join(re.findall(r"url \+=(.+?);", response.replace(r"\'", ''), re.DOTALL)).replace(' ', '').replace("@",
                                                                                                                   "")).replace(
            r"\'", '').replace("'", '')
        if var_url == '':
            print(response)
            return

        print(item)

if __name__ == '__main__':
    c = sougou_Demo()
    c.start_request()
