# encoding=utf8
import json
import sys
import time
from concurrent import futures
import re
# from selenium.webdriver import Chrome
# from selenium.webdriver import ChromeOptions
from random import random
# 获取随机useragent
# from fake_useragent import UserAgent
from lxml import etree
from urllib.parse import urlencode

import requests
from requests.cookies import RequestsCookieJar

# option = ChromeOptions()
# option.add_experimental_option('excludeSwitches', ['enable-automation'])
# driver = Chrome(options=option)

# cookies = dict(cookies_are='working')
# print(cookies)

#返回代理池txt的列表
def get_vip_proxy():
    # 读取txt文档的关键词
    wordlist = []
    for line in open("代理池.txt", encoding='utf-8'):
        wordlist.append(line.strip())
    print(wordlist)
    return wordlist

#获得cookie
def get_one_cookie():
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
        'Accept-Encoding': 'gzip, deflate, br',
        'Content-Type': 'application/x-www-form-urlencoded',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Cache-Control': 'max-age=0',
        'Host': 'weixin.sogou.com',
        'Connection': 'keep-alive'
    }
    url = "https://weixin.sogou.com/weixin?type=1&s_from=input&query=%E5%B0%8F%E9%B8%9F&ie=utf8&_sug_=n&_sug_type_="
    req = requests.get(url,headers=headers)
    cookies = req.cookies.get_dict()
    # print(cookies)
    cookies['browerV'] = '3'
    cookies['osV'] = '1'
    cookies['sct'] = '3'
    cookies['sst0'] = '552'
    cookies['SUV'] = '005E5558458CAE4B5B248FD6FBCA1033'
    # cookies['SNUID'] = 'F158A126FAFC77C61EAF8512FA2148C7'
    # list =[]  #  suid  snuid  suv 使用次数
    # list.append([cookies['SUID'],cookies['SNUID'],cookies['SUV'],1])
    # print(list)
    print(cookies)
    return cookies




# 代理池随机调取函数
def get_proxy():
    return requests.get("http://127.0.0.1:5010/get/").content.decode('utf8')

# 日志文档
def writelogtxt(msg):
    try:
        with open('log.txt', 'a+', encoding='utf-8') as f:
            f.write(msg + '\n')
    except:
        pass

# 代理池随机调取函数
def get_proxy():
    return requests.get("http://127.0.0.1:5010/get/").content.decode('utf8')


# 简单的写入文档操作,把成功的词语铣刀match.txt里边
def writetxt(msg):
    try:
        with open('符合SEO的词语.txt', 'a+', encoding='utf-8') as f:
            f.write(msg + '\n')
    except Exception as e:
        print(e)
        print("写入1出错")
        pass

 # 简单的写入文档操作,遇到验证码关闭连接，然后这个词等于没有匹配，存到一个地方
def writeNopasstxt(msg):
    try:
        with open('遇到验证码需要重新跑的词语.txt', 'a+', encoding='utf-8') as f:
            f.write(msg + '\n')
    except:
        print("写入2出错")
        pass
#读取txt文档的关键词
def readtxt():
    wordlist = []
    for line in open("keywords.txt", encoding='utf-8'):
        wordlist.append(line.strip())
    print(wordlist)
    return wordlist

def etreeParse(content):
    try:
        ehtml = etree.HTML(content)
        searchlist = ehtml.xpath('//*[@id="sogou_vr_11002301_box_0"]/div/div[2]/p[1]/a/text()')
        writetxt(searchlist[0])
        return searchlist[0]
    except:
        pass

#匹配目标公众号
def matchGZH(content, keyword):
    abc = fr'<em><!--red_beg-->{keyword}<!--red_end--></em></a>'
    # print(content)
    myvalue = re.search(abc,content,re.IGNORECASE)
    print(myvalue)
    if myvalue == None:
        # 丢弃
        # print(f'公众号没找到{keyword}')
        return False
    else:
        #匹配到公众号
        # print(f'公众号-------{keyword}')
        return True
#匹配目标微信
def matchwx(content, keyword):
    #进来的微信词语获得一下对应的公众号名，有名字的匹配下公众号
    try:
        wx_gzh = etreeParse(content)
        getresult = matchGZH(content, wx_gzh)
        if getresult: writetxt(wx_gzh)
    except Exception as e:
        print(e)
        pass
    abc = fr'<p class="info">微信号：<label name="em_weixinhao">{keyword}</label>'
    myvalue = re.search(abc, content, re.IGNORECASE)
    print(myvalue)
    if myvalue == None:
        # 丢弃
        # print(f'丢弃{keyword}')
        return False
    else:
        #匹配到微信号
        # print(f'微信号-------{keyword}')
        return True
#查询关键词，如果有就存到可用list
def matchkeyword(html,keyword):
    if matchwx(html, keyword) or matchGZH(html,keyword):
        print(f"{keyword}存入txt文档ok")
        writetxt(keyword)
    else:
        print(f"废弃词语{keyword}")

#通过关键词获取html
def gethtml(cookies,keyword):

    print(f'Runtime--{keyword}开始')
    # header 伪装浏览器设置（下面的二选一，看哪个合适）
    # ua = UserAgent(use_cache_server=False)
    # ua = UserAgent(verify_ssl=False)
    # 'User-Agent': ua.random,
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
        'Accept-Encoding': 'gzip, deflate, br',
        'Content-Type': 'application/x-www-form-urlencoded',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Cache-Control': 'max-age=0',
        'Host': 'weixin.sogou.com',
        'Connection': 'keep-alive'
    }
    session = requests.session()
    session.headers = headers
    # proxy = get_proxy()
    # print(f'proxy-----------{proxy}')
    # if proxy == "":
    #     print("代理列表为空，不进行请求，退出session")
    #     session.close()
    html = session.get(
        url=f'https://weixin.sogou.com/weixin?type=1&query={keyword}&ie=utf8&s_from=input&_sug_=n&_sug_type_=')
        # url=f'https://weixin.sogou.com/weixin?type=1&query={keyword}&ie=utf8&s_from=input&_sug_=n&_sug_type_=',proxies={"https": "http://{}".format(proxy)})
    html.encoding = 'utf-8'
    # print(html.cookies.get_dict())
    #
    # print(html.text)
    # print(html.url)
    # print(html.headers)
    # session.headers.update(html.headers)
    # print(session.headers)
    # session.close()
    # try:
    #     f = requests.get(html.url)
    #
    #     print(f.content)
    #     # print(Snuid)
    #     print('f的header是')
    #     print(f.headers)
    # except Exception as e:
    #     print(e)
    #     Snuid = ""
    #     print("dsfsaffs111111111111")

    serach = re.search('请输入图中的验证码', html.text)
    if serach != None:  #如果查询到有该字符，说明有验证码
    # 搜狗限制网页，等待输入验证码
    #     print('搜狗限制网页，等待验证码输入成功后开启')
    #     # print(content)
    #     SUsername = input("请确认验证码已成功开启，按任意字符回车继续")
        print(f'----X----X----X----X----Runtime--{keyword}验证码异常结束')
        writeNopasstxt(keyword)
        session.close()
    matchkeyword(html.text,keyword)
    print(f'Runtime--{keyword}结束')
    session.close()
    #多线程运行
def ThreadLine():
    with futures.ThreadPoolExecutor(max_workers=8) as executor:  # 多线程
        #for循环进行多线程调用
        for i in readtxt():
            #在循环内实例化，在外边实例化会让所有的值一致
            executor.submit(gethtml,i)


# 思路:1.循环读取match.txt列表，
# 采用多线程请求request，
# 每个线程获取到html 就判断是否有验证，没有的matchkeyword进行读写
if __name__ == '__main__':
    # ThreadLine()
    # gethtml('淘券网')
    # print(get_one_cookie())
    fornum = 1
    for i in readtxt():
        print(f"第{fornum}次循环")
        time.sleep(2)
        if (fornum - 1) % 10 == 0:
            cookies = get_one_cookie()
            print(f"更新Cookies-----{cookies}。")
            fornum =1
        gethtml(cookies,i)
        # 在循环内实例化，在外边实例化会让所有的值一致
        # executor.submit(gethtml,i)
        fornum = fornum + 1
