# -*- coding: utf-8 -*-
"""
Created on Tue May  7 20:54:42 2019

@author: Hsieh
"""
import requests
from bs4 import BeautifulSoup
import csv
from multiprocessing import Queue
import  threading
import random
import time
import xlwt
import pymysql
import bs4
import urllib
import re
import pandas as pd
import json
import codecs
User_Agent=["Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36","Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50","Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1","Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1"]
 
HEADERS = {
    'User-Agent':  User_Agent[random.randint(0,4)],
    # 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:55.0) Gecko/201002201 Firefox/55.0',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
    'Accept-Encoding': 'gzip, deflate, br',
    'Cookie': '',
    'Connection': 'keep-alive',
    'Pragma': 'no-cache',
    'Cache-Control': 'no-cache'
}

path_list_error = 'city_list_error.txt' #txt文件，用来保存获取jingdian_list5爬取出现错误的url
list_error_file = open(path_list_error,'a+')#爬取出错的url追加到文件的末尾

#path_jingdian_information = 'jingdian_information.txt'
#jingdian_information_file = open(path_jingdian_information,'a+')


cat_area = []
cat_province=[]
cat_city_id=[]
cat_city_name=[]


url = 'http://travel.qunar.com/place/?from=header'

response= requests.get(url,headers=HEADERS,allow_redirects=False,timeout=5)

if response.status_code==200:
    html=response.content
    html=html.decode("utf-8")
    soup = BeautifulSoup(html, "html.parser")
    div_list = soup.find('div', class_='contbox current')
    for dl_listbox in div_list.find_all('dl'):
        area = dl_listbox.find('dt').get_text()
        for dd_ct in dl_listbox.find('dd'):
            if dd_ct.find('span') is None:
                for li_item in dd_ct.find_all('li'):
                    href = li_item.find('a').get('href')
                    city_id = re.findall("\d+", href)[0]
                    province = li_item.find('a').get_text()
                    city_name = province
                    print(area,province,city_id,city_name,href)
            elif dd_ct.find('span') != -1:
                 dd_ct.find('span').get_text()
                 prov = dd_ct.find('span').get_text()
                 prov = ''.join(prov.split())
                 prov = prov[0:-1]
                 for li_item in dd_ct.find_all('li'):
                     if li_item is not None:
                          href = li_item.find('a').get('href')
                          city_id = re.findall("\d+", href)[0]
                          city_name = li_item.find('a').get_text()
                          print(area,prov,city_id,city_name)
                          
                     cat_city_id.append(city_id)
                     cat_city_name.append(city_name)
                     cat_province.append(prov)
                     cat_area.append(area)
else:
    list_error_file.write(url[0]+'\n')
                          
                          
print(len(cat_city_id))
print(len(cat_city_name))
print(len(cat_province))
print(len(cat_area))


cat_city_id, cat_city_name, cat_city_id
city = pd.DataFrame({'city_id': cat_city_id, 'city_name': cat_city_name, 'province': cat_province, 'area': cat_area})
city.to_csv('citytry2.csv', encoding='utf_8_sig')
                         