from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import requests
import time
import re
def getHtml(url):
    try:
        r=requests.get(url)
        r.raise_for_status()
        r.encoding=r.apparent_encoding
        return r.text
    except:
        return "读取失败"
def get_data(u):
    r=getHtml(u)
    soup = BeautifulSoup(r, "lxml")
    data=soup.find_all('div',class_='sight_item_detail clrfix')#找到景区信息相关数据
    datafor = []
    #具体分析将景区的各种信息存储在字典中
    for i in data:
        dicts={}
        dicts['景点名称']=i.find('h3',class_="sight_item_caption").text
        try:
          dicts['星级']=(i.find('div', class_='sight_item_info').find('div', class_='clrfix').find('span', class_='level')).text
        except:
            dicts['星级']='0'
        dicts['城市']=(i.find('div', class_='sight_item_info').find('div', class_='clrfix').find('span', class_='area').find('a')).text.split("·")[1]
        try:
            dicts['票价']=i.find('span',class_='sight_item_price').find('em').text
        except:
            dicts['票价']=0
        try:
            dicts['销量']=i.find('td',class_='sight_item_sold-num').find('span').text
        except:
            dicts['销量']=0
        try:
            dicts['热度']=i.find('span',class_='product_star_level').find('em').get('title').split(':')[1]
        except:
            dicts['热度']=''
        datafor.append(dicts)
    return datafor
def main():
    data=[]
    #爬取263页数据并保存为csv格式
    for i in range(1,263):
        try:
            data.extend(get_data("https://piao.qunar.com/ticket/list.htm?keyword=%E6%B1%9F%E8%8B%8F&region=&from=mpl_search_suggest&page="+str(i)))
            time.sleep(0.3)
        except:
            continue
    p=pd.DataFrame(data)
    p.to_csv(r"C:/Users/gcc/Desktop/jiangsu.csv", encoding="utf-8",index=False)
main()