import requests
# from lxml import etree
import parsel
import csv


f = open('去哪儿.csv', mode='a', encoding='utf-8', newline='')
csv_writer = csv.writer(f)
csv_writer.writerow([
    '地点', '短评', '浏览量', '出发日期', '天数', '人均费用', '人物', '详情页'
])

for page in range(1, 20):
    print(f'=====================正在爬去第{page}页================')
    # url = 'https://travel.qunar.com/travelbook/list.htm?order=hot_heat'
    url = f'https://travel.qunar.com/travelbook/list.htm?page={page}&order=hot_heat'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36'
    }

    resp = requests.get(url=url, headers=headers)
    data_html = resp.text

    # 提取数据
    # et = etree.HTML(data_html)
    # lis = et.xpath('/html/body/div[3]/div/div[2]/ul/li')

    selector = parsel.Selector(data_html)
    url_list = selector.css('body > div.qn_mainbox > div > div.left_bar > ul > li > h2 > a::attr(href)').getall()

    # https://travel.qunar.com/travelbook/note/7756410
    for data_url in url_list:
        data_id = data_url.replace('/youji/', '')
        url = 'https://travel.qunar.com/travelbook/note/' + data_id     # 每一个详情页连接
        # 发送详情页连接请求
        resp_1 = requests.get(url)
        # 获取详情页源代码
        data_html_1 = resp_1.text
        # 提取数据
        selector_1 = parsel.Selector(data_html_1)
        title = selector_1.css('.b_crumb_cont *:nth-child(3)::text').get()      # 标题
        comment = selector_1.css('.title.white::text').get()    # 名
        count = selector_1.css('.view_count::text').get()       # 浏览量
        date = selector_1.css('#js_mainleft > div.b_foreword > ul > li.f_item.when > p > span.data::text').get()     # 出发日期
        daty = selector_1.css('#js_mainleft > div.b_foreword > ul > li.f_item.howlong > p > span.data::text').get()     # 天数
        money = selector_1.css('#js_mainleft > div.b_foreword > ul > li.f_item.howmuch > p > span.data::text').get()     # 钱
        cheracter = selector_1.css('#js_mainleft > div.b_foreword > ul > li.f_item.who > p > span.data::text').get()     # 人物
        # p = selector_1.css('#js_mainleft > div.b_foreword > ul > li.f_item.how > p > span.data::text').get()     # 人物

        print(title, comment, count, date, daty, money, cheracter,url)
        csv_writer.writerow([title, comment, count, date, daty, money, cheracter, url])

    print(f'=====================第{page}页爬取成功================')

f.close()
print('全部爬取成功！！！！！！')




