# coding=utf8
import urllib.request
import urllib.parse
import string
import urllib
import re
import random
from io import BytesIO
import gzip
import re
import os


# 设置多个user_agents，防止百度限制IP
user_agents = ['Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20130406 Firefox/23.0', \
               'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:18.0) Gecko/20100101 Firefox/18.0', \
               'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533+ \
               (KHTML, like Gecko) Element Browser 5.0', \
               'IBM WebExplorer /v0.94', 'Galaxy/1.0 [en] (Mac OS X 10.5.6; U; en)', \
               'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)', \
               'Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14', \
               'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) \
               Version/6.0 Mobile/10A5355d Safari/8536.25', \
               'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) \
               Chrome/28.0.1468.0 Safari/537.36', \
               'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; TheWorld)']

headers = {
    "Accept-Encoding": "gzip, deflate, br",
    "Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.5,en;q=0.3",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36"
}


def baidu_search(keyword, pn):
    p = {'wd': keyword}
    #url = ("https://www.baidu.com/s?wd=" + urllib.parse.urlencode(p) + "&pn={0}&cl=3&rn=100").format(pn);
    #url = ("https://www.tianyancha.com/search?key="+ urllib.parse.urlencode(p)+"&page=1").format(pn);
    url = "https://hotel.qunar.com/cn/beijing_city/dt-2010?fromDate=2021-02-19&toDate=2021-02-20&highQuality=true"
    req = urllib.request.Request(url, headers=headers)
    res = urllib.request.urlopen(req)
    html = res.read()
    return html


def getList(regex, text):
    arr = []
    res = re.findall(regex, text)
    if res:
        for r in res:
            arr.append(r)
    return arr


def getMatch(regex, text):
    res = re.findall(regex, text)
    if res:
        return res[0]
    return ""


def clearTag(text):
    p = re.compile(u'<[^>]+>')
    retval = p.sub("", text)
    return retval

def geturl(keyword):
    for page in range(10):
        pn = page * 100 + 1
        htmls = baidu_search(keyword, pn)
        #解压
        buff = BytesIO(htmls)
        f = gzip.GzipFile(fileobj=buff)
        #解码
        content = f.read().decode('utf-8')
        print(content)
        #匹配<table开头class=“result”结尾的所有发字符串，匹配class=“result”开头>结尾的所有字符串，匹配>开头<\/a>结尾的所有字符串
        arrList = getList(u"<a class=\"text-option\">.*?<\/a>", content)
        print(arrList)
        for item in arrList:
            regex = u"<h3.*?class=\"t\".*?><a.*?href=\"(.*?)\".*?>(.*?)<\/a>"
            link = getMatch(regex, item)
            url = link[0]
            # 获取标题
            title = clearTag(link[1]).encode('utf8')
            try:
                domain = urllib.request.Request(url)
                r = random.randint(0, 11)
                domain.add_header('Accept',
                                  'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3')
                domain.add_header('Accept-Encoding', 'gzip, deflate, br')
                domain.add_header('Accept-Language', 'zh-CN,zh;q=0.9')
                domain.add_header('Cache-Control', 'max-age=0')
                domain.add_header('Connection', 'keep-alive')
                domain.add_header('Cookie', 'uuid_tt_dd=10_35489889920-1563497330616-876822; .......')
                domain.add_header('Host', 'blog.csdn.net')
                domain.add_header('Referer', 'https://i.csdn.net/')
                domain.add_header('Sec-Fetch-Mode', 'navigate')
                domain.add_header('Sec-Fetch-Site', 'none')
                domain.add_header('Sec-Fetch-User', '?1')
                domain.add_header('Upgrade-Insecure-Requests', '1')
                domain.add_header('User-Agent',
                                  'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36')
                response = urllib.request.urlopen(domain)
                uri = response.geturl()
                #print(title)
                print(url)
            except:
                continue

if __name__ == '__main__':
    geturl('去哪儿网')
