# 爬虫数据
import requests
from bs4 import BeautifulSoup
import time
import xlwt
# 用列表进行存储数据
# 星级
star=[]
# 评分
score=[]
# 简介
intro=[]
# 地址
address=[]
# 景区名字
sightName=[]
# 地区
districts=[]
# 月销量
saleCount=[]
# 价格
qunarPrice=[]

#功能：获取url   作者:王雅晴  日期: 2021/12/17
def get_url():
    for j in range(1,500):  # 爬取的页数
        time.sleep(1)      # 休眠1秒，爬取太快会被识别出来
        path =  "https://piao.qunar.com/ticket/list.htm?keyword=%E7%83%AD%E9%97%A8%E6%99%AF%E7%82%B9&region=&from=mps_search_suggest&page="+str(j)
        # 异常捕获 抛出异常后程序不终止
        try:
            get_content(path)
            print("第 %s 页爬取完成!" % j)
        except:
            print("错误信息页", j)

#功能：根据html标签获取关键信息   作者:王雅晴  日期: 2021/12/17
def get_content(path):
     request=requests.get(path,headers=header)
     request.encoding="utf-8"
     html=request.text
     data=BeautifulSoup(html,'html.parser')
     address_content=data.find_all("p",class_="address color999")
     sightName_content=data.find_all("a",class_="name")
     star_content=data.find_all("span",class_="level")
     districts_content=data.find_all("span",class_="area")
     score_content=data.find_all("span",class_="product_star_level")
     saleCount_content=data.find_all("span",class_="hot_num")
     qunarPrice_content=data.find_all("span",class_="sight_item_price")
     for i in address_content:
          text=i.text
          address.append(text)
     for i in sightName_content:
          text=i.text
          sightName.append(text)
    #由于star和intro的值有的为空，先把所有景区页面找出，方便在没有star,intro时进行填写，以防信息对应错误
     sight_items=data.findAll('div', attrs={'class': 'sight_item'})
     for sight_item in sight_items:
    #用find找到一个景区的star
          star_content=sight_item.find('span',class_='level')
          intro_content=sight_item.find("div", class_="intro color999")
    # 如果有star，将内容填入。如果没有在列表中填空
          if star_content:
              text = star_content.text
              star.append(text)
          else:
              text ="无"
              star.append(text)
    # 如果有intro，将内容填入，如果没有在列表中填空
          if intro_content:
              text = intro_content.text
              intro.append(text)
          else:
              text = "无"
              intro.append(text)

     for i in districts_content:
    # span（i）里面a的value
          text=i.a.text
          districts.append(text)

     for i in score_content:
          text=i.em.span.text
          score.append(text)

     for i in saleCount_content:
          text=i.text
          saleCount.append(text)

     for j in qunarPrice_content:
          text=j.em.text
          qunarPrice.append("¥"+text+"起")
#功能：将获取到的内容读取到excel表格中   作者:王雅晴  日期: 2021/12/17
def read_Excel():
    workbook=xlwt.Workbook(encoding="utf-8",style_compression=0)#创建workbook对象
    worksheet=workbook.add_sheet("热门景点",cell_overwrite_ok=True)#创建工作表
    col=["districts","sightName","adress","intro","star","score","saleCount","qunarPrice"]
#将内容写进表格
    for k in range(0,len(col)):
        worksheet.write(0,k,col[k])
    for i,j in zip(range(0,len(districts)),districts):
        worksheet.write(i+1,0,j)
    for i, j in zip(range(0, len(sightName)), sightName):
       worksheet.write(i + 1, 1, j)
    for i, j in zip(range(0, len(address)), address):
        worksheet.write(i + 1, 2, j)
    for i, j in zip(range(0, len(intro)), intro):
        worksheet.write(i + 1, 3, j)
    for i, j in zip(range(0, len(star)), star):
        worksheet.write(i + 1, 4, j)
    for i, j in zip(range(0, len(score)), score):
        worksheet.write(i + 1, 5, j)
    for i, j in zip(range(0, len(saleCount)), saleCount):
        worksheet.write(i + 1, 6, j)
    for i, j in zip(range(0, len(qunarPrice)), qunarPrice):
        worksheet.write(i + 1, 7, j)
    #将表格内容存进hotPlaces.xls中
    workbook.save("hot.xls")
if __name__ == '__main__':
    header = {
        'user-agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36 SE 2.X MetaSr 1.0"
    }
    get_url()
    read_Excel()
    print("爬取成功")