# -*- coding: utf-8 -*-
"""
Created on Sat May  4 10:00:29 2019

@author: Hsieh
"""

import requests
from bs4 import BeautifulSoup
import csv
from multiprocessing import Queue
import  threading
import random
import time
import xlwt
import pymysql
import bs4
import urllib
import re
import pandas as pd
import json
import codecs

User_Agent=["Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36","Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50","Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1","Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1"]
 
HEADERS = {
    'User-Agent':  User_Agent[random.randint(0,4)],
    # 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:55.0) Gecko/201002201 Firefox/55.0',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
    'Accept-Encoding': 'gzip, deflate, br',
    'Cookie': '',
    'Connection': 'keep-alive',
    'Pragma': 'no-cache',
    'Cache-Control': 'no-cache'
}

path_list_error = 'user_comment_error13.txt' #txt文件，用来保存获取jingdian_list5爬取出现错误的url
list_error_file = open(path_list_error,'a+')#爬取出错的url追加到文件的末尾

path_jingdian_information = 'user_comment_inform13.txt'
jingdian_information_file = open(path_jingdian_information,'a+')

csv_file = csv.reader(open('group_userlist13.csv','r'))
cat_user_id = []
cat_user_name= []
cat_jd_poi = []
cat_jd_name=[]
cat_score = []
cat_user_pic=[]

for url in csv_file:
    #print(url[0])

    try:
        time.sleep(1)
        response= requests.get(url[0],headers=HEADERS,allow_redirects=False,timeout=5)
        if response.status_code==200:
             html=response.content
             html=html.decode("utf-8")
             soup = BeautifulSoup(html, "html.parser")
             div_title=soup.find('div',class_='b_title clrfix')
             #景點id
             if div_title is not None:
                 scene=div_title.find('h1', class_='tit').get_text()
                 if div_title.find('a').get('data-id') is not None:
                    scene_id=div_title.find('a').get('data-id')
                    #jd_name= re.sub("[A-Za-z0-9\']", "", scene)
                  #cat_scene_name.append(scene_name)
                    #print(jd_name)
                    print(scene_id)
                 else:
                     href=div_title.find('a').get('href')
                     scene_id=re.findall("\d+", href)[0]
                     print(scene_id)
                     
             #景點名        
             if div_title is not None:
                 scene=div_title.find('h1', class_='tit').get_text()
                 jd_name= re.sub("[A-Za-z0-9\']", "", scene)
                 print(jd_name)
                 #item['scene_name']=scene_name
            


             #maxnum = soup.find('span', {'class': 'e_nav_comet_num'}).text

             #maxnum = int(maxnum)
             #maxnum=3
             url2 = 'http://travel.qunar.com/place/api/html/comments/poi/' + scene_id +'?poiList=true&sortField=1&rank=0&pageSize=50&page='
             #page = 2
             #if (page - 1) * 10 > maxnum:
                 #page = int(((maxnum + 10) / 10)+1)
             for i in range(1, 2):#i是页数评论的
                 url1 = url2 + str(i)
                 #print(url1)
                 User_Agent=["Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36","Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50","Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1","Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1"]
                 try:
                      time.sleep(2)
                      json_str = requests.get(url1, headers={'User-Agent': User_Agent[random.randint(0,4)]}).text
                      json_data=json.loads(json_str)['data']
                       #print(json_data)
                      bsObj = BeautifulSoup(json_data, 'lxml')

                      bs=bsObj.find_all('li',{'class':'e_comment_item clrfix'})
                      #print(bs)
                      
                      for j in range(0,len(bs)):#j那个盒子的(li)
                          #<img height="60" width="60" src="https://qcommons.qunar.com/headshot/headshotsById/2276818.png?l&amp;ssl=true" />
                            #找圖片名
                          user_pic = bs[j].find('div',{'class':'e_comment_usr_pic'}).find('img')
                          if user_pic is not None:
                              user_pic_name =  user_pic.get('src')
                          else:
                              user_pic_name = " "
                          
                          #print(user_pic)
                          #print(user_pic_name)
                          
                          
                          user = bs[j].find('div', {'class': 'e_comment_usr_name'}).find('a')
                          if user is not None:
                              user_name = (user.text)  # 名字
                          else:
                              user_name = " "
                          
                          use_id = ''
                          if user.get('href') == "javascript:;":
                              user_bs = bs[j].get('id')
                              use_id = (''.join([x for x in user_bs if x.isdigit()]))
                              a = type(use_id)
                              #print(use_id)
                          else:
                              use_id = (''.join([x for x in user.attrs['href'] if x.isdigit()]))
                              a = type(use_id)
                              #print(use_id)

                          score = ''.join(
                              [x for x in str(bs[j].find('span', {'class': 'total_star'}).find('span')) if x.isdigit()])

                          cat_user_name.append(user_name)
                          cat_jd_poi.append(scene_id)
                          cat_user_id.append(use_id)
                          cat_score.append(score)
                          cat_jd_name.append(jd_name)
                          cat_user_pic.append(user_pic_name)
                          print(jd_name,scene_id,user_name,use_id,user_pic_name,score)
                 except:
                      #每个景点的具体信息爬取或者解析失败
                      jingdian_information_file.write(url1+'\n')

        else:
            #response.status_code不等于200爬取错误，也需要保存到文件
            list_error_file.write(url[0]+'\n')

    except Exception as e:
        list_error_file.write(url[0]+'\n')

#保存成功的信息

#保存成功的信息
print(len(cat_user_id))
print(len(cat_user_name))
print(len(cat_jd_poi))
print(len(cat_score))
print(len(cat_jd_name))
print(len(cat_user_pic))


cat_user_id, cat_user_name, cat_jd_poi, cat_score,cat_jd_name,cat_user_pic
#city = pd.DataFrame({'user_id': cat_user_id, 'user_name': cat_user_name, 'jd_poi': cat_jd_poi, 'score': cat_score})
city = pd.DataFrame({'jd_name':cat_jd_name,'jd_poi': cat_jd_poi,'user_name': cat_user_name,'user_id': cat_user_id,'score': cat_score,'pic_href':cat_user_pic})
#city = pd.DataFrame({'jd_name':cat_jd_name,'jd_poi': cat_jd_poi,'jd_poi': cat_jd_poi,'user_name': cat_user_name,'user_pic_name'=cat_user_pic,'score': cat_score})
city.to_csv('user_comment13.csv', encoding='utf_8_sig')
