from urllib import request
import requests
from bs4 import BeautifulSoup as bs
from urllib import parse
from requests import get  # 导入 requests.get()
import os
from time import sleep
from urllib.parse import unquote


# 【下载模块】
# 定义一个函数，用于将url处理成请求对象
def request_from(url, page):
    # 拼接url
    page_url = url + str(page) + "/"
    # 请求头
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
    return request.Request(url=page_url, headers=headers)


# 获取总页数
def getCountPage(url):
    try:
        response = requests.get(url)
        html = response.text
        html_tree = bs(html, "html.parser")
        # 查询ul标签
        host_infos = html_tree.find("div", {"id": "dict_page"})
        # 查询所有li标签
        host_list = host_infos.find_all("li")
        length = len(host_list)
        endLi = host_list[length - 2]

        if endLi.find('a').get_text() is None:
            end = str(0)
        else:
            end = endLi.find('a').get_text()
        return end

    except:
        print("没有分页")


# 获得页面信息
def getCountPageLink(url):
    list1 = []
    response = requests.get(url)
    html = response.text
    html_tree = bs(html, "html.parser")
    # 查询ul标签
    host_infos = html_tree.find("div", {"id": "dict_nav_list"})
    # 查询所有li标签
    host_list = host_infos.find_all("li")
    for host in host_list:
        a = host.find('a')
        list1.append(a['href'])
    return list1


# 封装一个函数，用于发起请求
def request_data(url, start, end, url1):
    "这个函数用于将有start页到end页的url处理成响应字符串，然后放在容器中返回"
    # l = []
    # 对start到end进行遍历

    for page in range(start, end + 1):
        # 创建请求对象
        req = request_from(url=url, page=page)
        # 发起请求
        res = request.urlopen(req)
        # 每请求一次要让程序中断1s，主要为了防止后台误认为当前程序是爬虫
        print("当期正在请求第%d页..." % page)
        sleep(0.01)
        url = url1
        # l.append(res.read().decode("utf-8"))
        yield res.read().decode("utf-8")
        # yield可以将一个函数变成一个生成器，生成器中内容就是每一次yield后面的内容
    # return l


# 【解析模块】
def analysis_data(data):
    list1 = []
    "这个函数，传入待解析的那些html字符串，输出解析完的格式化的内容"
    for html in data:
        html_tree = bs(html, "html.parser")
        host_infos = html_tree.find("div", {"id": "dict_detail_list"})
        host_list = host_infos.find_all("div", {"class": "dict_dl_btn"})
        for host in host_list:
            a = host.find('a')
            list1.append(a['href'])
    return list1


# 请求一级网页，获得二级链接集合
def getChildLink1(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36'}
    cookies = {
        'cookie': 'ibulanguage=CN; ibulocale=zh_cn; cookiePricesDisplayed=CNY; _RSG=pAqRa7.eNl1XWMgoj4szE8; _RGUID=1bd7a3e1-e46f-42b3-83bd-d84b72a96821; _RDG=287497163410aa289613c138fc6b4ea606; _ga=GA1.2.1667135814.1613715692; _gid=GA1.2.1459727991.1613715692; MKT_CKID=1613715692038.2ciyc.3rkt; MKT_CKID_LMT=1613715692040; MKT_Pagesource=PC; IBU_TRANCE_LOG_P=3778815087; _RF1=183.15.179.105; _abtest_userid=2db84577-a696-42ee-b4bc-859be3f7c40d; librauuid=5oZmuXbMsAAGCg4L; appFloatCnt=3; cticket=58803F37C1A4486ACD06B349DD60568557DC506247ED73A666AC4DEFD3F0B5C9; AHeadUserInfo=VipGrade=10&VipGradeName=%BB%C6%BD%F0%B9%F3%B1%F6&UserName=&NoReadMessageCount=1; ticket_ctrip=bJ9RlCHVwlu1ZjyusRi+ypZ7X2r4+yojXN5UTMe2Bf17n4cHgY02XvZrTfHRKU3lCIgpifILPcD1H9AMiyOV4tTrC0HqOprH3JHyOSiuHk2YZZqV8rqnWSPHuFsSOnnQwIsIEy3lq5gKpF4q8WmIcwDFGrskd8ObAXeZME8XTf7/Y53noLmvHqDWnGE+BVGi43jfpFwCj6y0lT7GPnymtosbakUPs31KcJPZ5yP49tSYF74U6CzmMfQSEPribx4OFWCvujZekIq6DCjFJYjLkuhx12O8YsEjRsC3pG/GTwsPMiJC4f6EdQ==; DUID=u=630B869196385E725185E30F09ACCBBD6ABE888A2113593197B15D365F3C83C1&v=0; IsNonUser=F; IsPersonalizedLogin=F; UUID=4C3B4DC1402848EBAF623B1DE7D29751; intl_ht1=h4=2_13318959,2_661069; hotel=13318959; _uetsid=c4141950732111eb834f4321bd2d8993; _uetvid=c4149e40732111ebbe32c7c6a98fadb7; _bfa=1.1613715688478.1b7uqe.1.1613715688478.1613787428382.2.9; _bfs=1.8; _gat=1; Union=OUID=index&AllianceID=4897&SID=155952&SourceID=&createtime=1613789317&Expires=1614394116913; MKT_OrderClick=ASID=4897155952&AID=4897&CSID=155952&OUID=index&CT=1613789316920&CURL=https%3A%2F%2Fwww.ctrip.com%2F%3Fsid%3D155952%26allianceid%3D4897%26ouid%3Dindex&VAL={"pc_vid":"1613715688478.1b7uqe"}; _jzqco=%7C%7C%7C%7C1613715693049%7C1.1948631821.1613715692056.1613788037091.1613789316979.1613788037091.1613789316979.0.0.0.8.8; __zpspc=9.3.1613789316.1613789316.1%232%7Cwww.baidu.com%7C%7C%7C%25E6%2590%25BA%25E7%25A8%258B%25E9%2585%2592%25E5%25BA%2597%7C%23; _bfi=p1%3D100101991%26p2%3D102002%26v1%3D9%26v2%3D8'
    }

    response = requests.get(url,  headers=headers)
    html = response.text
    print(html)
    html_tree = bs(html, "html.parser")
    print(html_tree)
    hotel = html_tree.find("div", {"id": "root"})
    hotel_section = hotel.find("section", {"class": "G_detailBG"})
    hotel_clearfix = hotel_section.find("div", {"class": "name_box clearfix"})
    name = hotel_clearfix.find("p", {"class": "name"})
    print(name.get_text())


# 请求一级网页，获得二级链接集合
def getChildLink2(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36'
    }
    cookies = {
        'cookie': 'ibulanguage=CN; ibulocale=zh_cn; cookiePricesDisplayed=CNY; _RSG=pAqRa7.eNl1XWMgoj4szE8; _RGUID=1bd7a3e1-e46f-42b3-83bd-d84b72a96821; _RDG=287497163410aa289613c138fc6b4ea606; _ga=GA1.2.1667135814.1613715692; _gid=GA1.2.1459727991.1613715692; MKT_CKID=1613715692038.2ciyc.3rkt; MKT_CKID_LMT=1613715692040; MKT_Pagesource=PC; IBU_TRANCE_LOG_P=3778815087; _RF1=183.15.179.105; _abtest_userid=2db84577-a696-42ee-b4bc-859be3f7c40d; librauuid=5oZmuXbMsAAGCg4L; appFloatCnt=3; cticket=58803F37C1A4486ACD06B349DD60568557DC506247ED73A666AC4DEFD3F0B5C9; AHeadUserInfo=VipGrade=10&VipGradeName=%BB%C6%BD%F0%B9%F3%B1%F6&UserName=&NoReadMessageCount=1; ticket_ctrip=bJ9RlCHVwlu1ZjyusRi+ypZ7X2r4+yojXN5UTMe2Bf17n4cHgY02XvZrTfHRKU3lCIgpifILPcD1H9AMiyOV4tTrC0HqOprH3JHyOSiuHk2YZZqV8rqnWSPHuFsSOnnQwIsIEy3lq5gKpF4q8WmIcwDFGrskd8ObAXeZME8XTf7/Y53noLmvHqDWnGE+BVGi43jfpFwCj6y0lT7GPnymtosbakUPs31KcJPZ5yP49tSYF74U6CzmMfQSEPribx4OFWCvujZekIq6DCjFJYjLkuhx12O8YsEjRsC3pG/GTwsPMiJC4f6EdQ==; DUID=u=630B869196385E725185E30F09ACCBBD6ABE888A2113593197B15D365F3C83C1&v=0; IsNonUser=F; IsPersonalizedLogin=F; UUID=4C3B4DC1402848EBAF623B1DE7D29751; intl_ht1=h4=2_13318959,2_661069; hotel=13318959; _uetsid=c4141950732111eb834f4321bd2d8993; _uetvid=c4149e40732111ebbe32c7c6a98fadb7; _bfa=1.1613715688478.1b7uqe.1.1613715688478.1613787428382.2.9; _bfs=1.8; _gat=1; Union=OUID=index&AllianceID=4897&SID=155952&SourceID=&createtime=1613789317&Expires=1614394116913; MKT_OrderClick=ASID=4897155952&AID=4897&CSID=155952&OUID=index&CT=1613789316920&CURL=https%3A%2F%2Fwww.ctrip.com%2F%3Fsid%3D155952%26allianceid%3D4897%26ouid%3Dindex&VAL={"pc_vid":"1613715688478.1b7uqe"}; _jzqco=%7C%7C%7C%7C1613715693049%7C1.1948631821.1613715692056.1613788037091.1613789316979.1613788037091.1613789316979.0.0.0.8.8; __zpspc=9.3.1613789316.1613789316.1%232%7Cwww.baidu.com%7C%7C%7C%25E6%2590%25BA%25E7%25A8%258B%25E9%2585%2592%25E5%25BA%2597%7C%23; _bfi=p1%3D100101991%26p2%3D102002%26v1%3D9%26v2%3D8'
    }

    response = requests.get(url,  headers=headers)
    html = response.text
    html_tree = bs(html, "html.parser")
    hotel = html_tree.find("div", {"id": "root"})
    hotel_section = hotel.find("section", {"class": "G_section_1200 clearfix"})
    hotel_section2 = hotel_section.find_all("aside", {"class": "G_aside_880 G_blockBG patch_MB20 poi_tab"})
    hotel_desc = hotel_section2[0].find("div", {"class": "hotel_desc"})
    #hotel_desc = BeautifulSoup(hotel_desc,'lxml')
    hotel_clearfix = hotel_desc.find_all("dl", {"class": "desc_item clearfix"})
    if len(hotel_clearfix) <= 19:
        phone = hotel_clearfix[0].find("dd", {"class": "cont"})
    else:
        phone = hotel_clearfix[1].find("dd", {"class": "cont"})
    print(phone.get_text())


if __name__ == '__main__':
    # 一级页面链接
    url = "https://hotel.tuniu.com/hotel-api/hotel/detail?c=%7B%22ct%22:20000%7D&d=%7B%22hotelId%22:%22372536654%22%7D"
    # 请求一级网页，获得二级链接集合
    getChildLink1(url=url)
    getChildLink2(url=url)
