# -*- coding: utf-8 -*-
"""
Created on Sun May  5 01:20:54 2019

@author: Hsieh
"""

import requests
from bs4 import BeautifulSoup
import csv
from multiprocessing import Queue
import  threading
import random
import time
import xlwt
import pymysql
import bs4
import urllib
import re
import pandas as pd
import json
import codecs

User_Agent=["Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36","Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50","Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1","Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1"]
 
HEADERS = {
    'User-Agent':  User_Agent[random.randint(0,4)],
    # 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:55.0) Gecko/201002201 Firefox/55.0',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
    'Accept-Encoding': 'gzip, deflate, br',
    'Cookie': '',
    'Connection': 'keep-alive',
    'Pragma': 'no-cache',
    'Cache-Control': 'no-cache'
}
path_list_error = 'jingdian_content_errortry1.txt' #txt文件，用来保存获取jingdian_list5爬取出现错误的url
list_error_file = open(path_list_error,'a+')#爬取出错的url追加到文件的末尾



csv_file = csv.reader(open('jingdian_list14.csv','r'))

#csvfile = open('去哪儿景点.csv','w',encoding='utf-8', newline='')
#writer = csv.writer(csvfile)
#writer.writerow(["景点名称","景点id"])
cat_scene_name = []
cat_scene_id= []
cat_scene_score = []
cat_scene_address=[]
cat_scene_phone=[]
cat_scene_href=[]
cat_scene_img=[]
cat_scene_city=[]


for url in csv_file:
    #print(url[0])

    try:
        time.sleep(1)
        response= requests.get(url[0],headers=HEADERS,allow_redirects=False,timeout=5)
        if response.status_code==200:
             html=response.content
             html=html.decode("utf-8")
             soup = BeautifulSoup(html, "html.parser")
             div_title = soup.find('div', class_='b_title clrfix')
             if div_title is not None:
                  scene = div_title.find('h1', class_='tit').get_text()
                  scene_name= re.sub("[A-Za-z0-9\']", "", scene)
                  #cat_scene_name.append(scene_name)
                  print(scene_name)
                  #景点id
                  if div_title.find('a').get('data-id') is not None:
                      scene_id=div_title.find('a').get('data-id')
                      #cat_scene_id.append(scene_id)
                      print(scene_id)
                  else:
                      href=div_title.find('a').get('href')
                      scene_id=re.findall("\d+", href)[0]
                      #cat_scene_id.append(scene_id)
                      print(scene_id)
                  #scene_id = item['scene_id']
                  
              #景点城市    
             li_title = soup.find('div', class_='e_crumbs')
             if li_title is not None:
                 #scene_a = li_title.find_all('li',class_='item pull')
                 #scene_b=scene_a.find_all('a')
                 scene_a = li_title.find_all('a',{'class':'txtlink','data-beacon':'Breadcrumb'})
                 #print(scene_b)
                 if scene_a is not None:
                     scene_t = scene_a[3]
                     if scene_t is not None:
                        scene_city =scene_t.get_text()                         
                        print(scene_city)
                     else:
                          scene_city = " "
                 else:
                     scene_city = " "
                     print(scene_city)
             else:
                 scene_city = " "
                 print(scene_city)
                 

        #爬取景点评分
             div_score=soup.find('span',class_='cur_score')
             if div_score is not None:
                score=div_score.get_text()
                #cat_scene_score.append(score)
                print(score)
             else:
                  score=" "
             td = soup.find('td', class_='td_l')
             link = {}
             if td is not None:
                 for dl in td.find_all('dl'):
                     if dl.find('dt') is not None:
                         key = dl.find('dt').get_text()
                         #print(key)
                     if dl.find('dd') is not None:
                         content = dl.find('dd').get_text()
                         #print(content)
                         link[key] = content
                 if link:
                     if '地址:' in link:
                         # item['scene_address']=link['地址:']
                         scene_address = link['地址:']
                         print(scene_address)
                     else:
                         # item['scene_address']=None

                         scene_address = " "
                     if '电话:' in link:
                         # item['scene_phone']=link['电话:']
                         scene_phone = link['电话:']
                         print(scene_phone)
                     else:
                         scene_phone = " "
                         print(scene_phone)
                 else:
                     scene_address = " "
                     scene_phone = " "
                     print(scene_address)
                     print(scene_phone)
                     
            #爬取href
             div_history = soup.find('div',class_='history_list')
             if div_history is not None:
                 div_span = div_history.find('span')
                 if div_span is not None:
                     div_b= div_span.find('a').get('href')
                     if div_b is not None:
                         div_a = div_b
                         print(div_a)
                     else:
                         div_a=" "
                         print(div_a)                     
                 else:
                     div_a=" "
                     print(div_a)
                                 
             else:
                 div_a=" "
                 print(div_a)
             
            #爬取img    
             div_focus =  soup.find('div',class_='e_have_images clrfix')
             if div_focus is not None:
                 div_imgbox = div_focus.find('li',class_='imgbox')
                 if div_imgbox is not None:
                     div_src = div_imgbox.find('img').get('src')
                     if div_src is not None:
                         div_img = div_src
                     else:
                         div_img = " "
                 else:
                     div_img = " "                 
             else:
                 div_img = " "
                 
             cat_scene_name.append(scene_name)
             cat_scene_id.append(scene_id)
             cat_scene_score.append(score)
             cat_scene_address.append(scene_address)
             cat_scene_phone.append(scene_phone)
             cat_scene_href.append(div_a)
             cat_scene_img.append(div_img)
             cat_scene_city.append(scene_city)
            
             
        else:
            list_error_file.write(url[0]+'\n')
           
    except Exception as e:
        list_error_file.write(url[0]+'\n')            
             
print(len(cat_scene_name))
print(len(cat_scene_id))
print(len(cat_scene_score))
print(len(cat_scene_address))
print(len(cat_scene_phone))
print(len(cat_scene_href))
print(len(cat_scene_img))
print(len(cat_scene_city))


cat_scene_name, cat_scene_id,cat_scene_score ,cat_scene_address,cat_scene_phone, cat_scene_href,cat_scene_img,cat_scene_city
city = pd.DataFrame({'jd_name':cat_scene_name,'jd_poi': cat_scene_id,'jd_score':cat_scene_score,'jd_address':cat_scene_address,'jd_phone':cat_scene_phone,'jd_href': cat_scene_href,'jd_img':cat_scene_img,'jd_city':cat_scene_city})
city.to_csv('group_jd9.csv', encoding='utf_8_sig')