import gzip

import requests
from bs4 import BeautifulSoup
import csv
from fake_useragent import UserAgent
from time import sleep

HEADERS = {
'User-Agent': UserAgent().random,
# 'Accept': 'text/html,application/xhtml+xml, application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': ' zh-CN,zh; q=0.8 en-US; q=0.5 ,en; q=0.3',
# 'Accept-Encoding' : 'gzip, deflate, br',
# 'Cookie': '',
# 'Connection': 'keep-alive',
# 'Pragma': ' no-cache',
# 'Cache-Control': 'no-cache'
}
# HEADERS = {
#         'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
#     }

#新建填写入景点的csv文件
csvfile  =  open('去哪儿景点.csv','w',encoding='utf-8',newline='')
writer = csv.writer(csvfile)
writer.writerow(["区域","名称","景点id","类型","级别","热度","地址","特色","经纬度"])

#下载景点内容函数
def download_page(url):
    try:
        data = requests.get(url,headers=HEADERS,allow_redirects=True).content
        return data
    except:
        pass

#下载页面如果返回状态码不为200,2秒后在请求
def download_soup_waitting(url):
    try:
        response =requests.get(url, headers=HEADERS, allow_redirects=False, timeout=5)
        if response.status_code == 200:
            html = response.content
            html = html.decode("utf-8")
            soup = BeautifulSoup(html,"html.parser")
            return soup
        else:
            sleep(2)
            print("等待下载中")
            return download_soup_waitting(url)
    except:
        return ""
def getTypes():
    #定义热门景点的类型
    types = ["文化古迹","自然风光","公园","古建筑","寺庙","遗址","古镇","陵墓陵园","故居","宗教"]
    for type in types:
        url = "https://piao.qunar.com/ticket/list.htm?keyword=%E7%83%AD%E9%97%A8%E6%99%AF%E7%82%B9&region=&from=mps_search_suggest&subject="+type+"&page=1"
        getType(type,url,i)

#计数
i = 1

def getType(type,url,i):
    i+=1
    #下载热点旅游数据为soup对象
    soup = download_soup_waitting(url)
    #旅游景点对应的列表元素
    search_list = soup.find('div', attrs={'id': 'search-list'})
    #找到所有的旅游景点项目，并对其进行遍历
    sight_items = search_list.findAll('div', attrs={'class': 'sight_item'})
    for sight_item in sight_items:
        name = sight_item['data-sight-name']
        districts = sight_item['data-districts']
        point = sight_item['data-point']
        address = sight_item['data-address']
        data_id = sight_item['data-id']
        level = sight_item.find('span', attrs={'class': 'level'})
        if level:
            level = level.text
        else:
            level = ""
        product_star_level = sight_item.find('span', attrs={'class': 'product_star_level'})
        if product_star_level:
            product_star_level = product_star_level.text
        else:
            product_star_level = ""
        intro = sight_item.find('div', attrs={'class': 'intro'})
        if intro:
            intro = intro['title']
        else:
            intro = ""
        writer.writerow(
            [districts.replace("\n",""),name.replace("\n",""),data_id.replace("\n",""),type.replace("\n",""),
             level.replace("\n",""),product_star_level.replace("\n",""),address.replace("\n",""),
             intro.replace("\n",""),point.replace("\n","")])
    #找到向下翻页的按钮，如果发现，往下翻页，继续下载景点内容
    next = soup.find('a',attrs={'class':'next'})
    if next:
        next_url = "https://piao.qunar.com"+next['href']
        print(i)
        getType(type,next_url,i)

if __name__ == '__main__':
    getTypes()