# -*- coding: utf-8 -*-
"""
Created on Tue May  7 22:43:44 2019

@author: Hsieh
"""
import requests
from bs4 import BeautifulSoup
import csv
from multiprocessing import Queue
import  threading
import random
import time
import xlwt
import pymysql
import bs4
import urllib
import re
import pandas as pd
import json
import codecs



        


path_list_error = 'hotel_list_error.txt' #txt文件，用来保存获取jingdian_list5爬取出现错误的url
list_error_file = open(path_list_error,'a+')#爬取出错的url追加到文件的末尾

path_jingdian_information = 'hotel_list_infor.txt'
jingdian_information_file = open(path_jingdian_information,'a+')




cat_id = []
cat_name= []
cat_distance = []
cat_score = []

def parse_city(start_url):
    try:
        response= requests.get(start_url,headers=headers,allow_redirects=False,timeout=5)
        if response.status_code==200:
            html=response.content
            html=html.decode("utf-8")
            soup = BeautifulSoup(html, "html.parser")
            div = soup.find('div', class_='contbox box_padd')
            #print(div)
            for ll in div.find_all('li'):
                if ll.get('data-type') == '2':
                    url2=ll.find('a').get('href')
                    #print(url2)
                    city_page(url2)
    except Exception as e:
        list_error_file.write(url[0] + '\n')

def city_page(url2):
    try:
        time.sleep(1)
        response= requests.get(url2,headers=headers,allow_redirects=False,timeout=5)
        if response.status_code==200:
            html=response.content
            html=html.decode("utf-8")
            soup = BeautifulSoup(html, "html.parser")
            div_list = soup.find('div', class_='listbox')
            for li in div_list.find_all('li'):
                 if li.find('a') is not None:
                     name = li.find('a').get_text()
                     #print(name)
                     href = li.find('a').get('href')
                     id = href.split('-')[1][2:]
                     #print(id)
                 if li.find('span', class_='sum_bold') is not None:
                     score = li.find('span', class_='sum_bold').get_text()
                     #print(score)
                 else:
                     score = " "
                 if li.find('div', class_='distance') is not None:
                     distance = li.find('div', class_='distance').get_text()
                     #print(distance)
                 else:
                     distance=" "
                 print(name,id,distance,score)
                 cat_id.append(id)
                 cat_name.append(name)
                 cat_distance.append(distance)
                 cat_score.append(score)
                     
            page=soup.find('div',class_='b_paging')
            #print(page)
            next_page=page.find('a',class_='page next')
            #print(next_page)
            if next_page is not None:
                 page=next_page.get('href')
                 #print(page)
                 url2 = page
                 city_page(url2)
                 #print(url2)                 
          
        else:

            list_error_file.write(url[0] + '\n')
    except Exception as e:
        list_error_file.write(url[0] + '\n')
    
        
    print(len(cat_id))
    print(len(cat_name))
    print(len(cat_distance))
    print(len(cat_score))
    cat_id,cat_name,cat_distance,cat_score
    food = pd.DataFrame({'hotel_id':cat_id,'hotel_name':cat_name,'hotel_distance':cat_distance,'hotel_score':cat_score})
    food.to_csv('hotel_information1.csv', encoding='utf_8_sig')

        

if __name__=='__main__':
    headers = {
        'User-Agent': 'Mozilla/4.0(compatible;MSIE 5.5;Windows NT)', }
    csv_file = csv.reader(open('list4.csv','r'))
    for url in csv_file:
        start_url= url[0]
        parse_city(start_url) 


