import requests
import json
from pyquery import PyQuery as jq
from urllib.parse import quote
from urllib.parse import urljoin
import pandas as pd
from retry import retry
import pickle

datacol = {0:'标题',1:'id',2:'链接',3:'评论数量',4:'评论内容'}
headers = json.loads(r'''{
    "Accept": "*/*",
    "Accept-Encoding": "gzip, deflate",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Connection": "keep-alive",
    "Cookie": "_abtest_userid=05b8cbd2-12ce-4d1d-ab72-30e2519dd5e6; _RSG=urqs2Zl1bC5UIWMpopIQm8; _RDG=283c65b32d361e275d1c66ab8baabdcab9; _RGUID=532506f8-c2db-4d78-8c1a-4b3cb35a049e; _ga=GA1.2.1201612365.1529504937; MKT_Pagesource=PC; StartCity_Pkg=PkgStartCity=2; _RF1=115.216.125.253; Session=smartlinkcode=U130026&smartlinklanguage=zh&SmartLinkKeyWord=&SmartLinkQuary=&SmartLinkHost=; Union=AllianceID=4897&SID=130026&OUID=&Expires=1530283651278; _gid=GA1.2.848932296.1529678851; ASP.NET_SessionSvc=MTAuMTUuMTI4LjI0fDkwOTB8b3V5YW5nfGRlZmF1bHR8MTUyNjU0NjUyNzQ2MQ; appFloatCnt=4; manualclose=1; LatelySearch=%e5%91%a8%e8%be%b9%5e0%5eU%5e%e5%91%a8%e8%be%b9%7c%e8%8b%8f%e5%b7%9e%5e0%5eU%5e%e8%8b%8f%e5%b7%9e%7c%e8%8b%8f%e5%b7%9e%e4%b8%80%e6%97%a5%e6%b8%b8%5e0%5eU%5e%e8%8b%8f%e5%b7%9e%e4%b8%80%e6%97%a5%e6%b8%b8%7c; _bfa=1.1529504934107.2m67e1.1.1529504934107.1529678848466.2.15; _bfs=1.12; Mkt_UnionRecord=%5B%7B%22aid%22%3A%224897%22%2C%22timestamp%22%3A1529679665785%7D%5D; _jzqco=%7C%7C%7C%7C%7C1.738109619.1529504937116.1529679562389.1529679665832.1529679562389.1529679665832.0.0.0.15.15; __zpspc=9.2.1529678851.1529679665.12%232%7Cwww.baidu.com%7C%7C%7C%7C%23; _bfi=p1%3D350010%26p2%3D350010%26v1%3D15%26v2%3D14",
    "Host": "huodong.ctrip.com",
    "Referer": "http://huodong.ctrip.com/dailytour/search/?keyword=%25e8%258b%258f%25e5%25b7%259e%25e4%25b8%2580%25e6%2597%25a5%25e6%25b8%25b8&filters=s13",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36",
    "X-Requested-With": "XMLHttpRequest"
}''')

class weibo():

	def __init__(self):
		self.mylist = []
		self.content = ''
		self.lxurl = "http://huodong.ctrip.com/Activity-Booking-OnlineWebSite/Search/dailytour/List"
		self.plurl = "http://huodong.ctrip.com/Activity-Booking-OnlineWebSite/Recommend/UserComments"
	@retry(delay=5, max_delay=60,backoff=15)
	def getLuxian(self,page):
		print(f"正在爬第{page}页数据")
		params ={
		'SearchText':'苏州',
		'SearchType':'U',
		'FilterPara':f's13p{page}'
		}
		resp = requests.get(self.lxurl, params=params, headers=headers)
		for aa in jq(resp.text)('a').items():
			name = aa('h2').text()
			if '杭州' in name or '上海' in name or '西湖' in name or '无锡' in name: 
				pass
			else:
				if '苏州' in name or '拙政园' in name or '狮子林' in name or '周庄' in name or '同里' in name or '虎丘' in name or '寒山寺' in name or '甪直' in name or '留园' in name or '沙家浜' in name:
					dianping = aa('.product_r > .product_info > .product_db').text()
					url = aa.attr('href')
					url = urljoin("http://huodong.ctrip.com/",aa.attr('href'))
					name = aa('h2').text()
					urlencode = quote(name)

					ids = url.split('activity/')[1].split('.')[0]
					productName = quote(name)

					list = [name,ids,url,dianping]
					self.mylist.append(list)
					print(f"正在爬{name} : ids = {ids}")
					if '无' in dianping:
						pass
					else:
						if '新产品' in dianping:
							dianping = dianping.split('新产品，')[1].split('条')[0]
						else:
							dianping = dianping.split('条')[0]
							if int(dianping) % 10 > 0:
								ccc = int(dianping)//10 + 2
							else:
								ccc = int(dianping)//10 + 1
							print(ccc)
							for bb in range(1,int(ccc)):
								self.getContent(ids,productName,bb)


	@retry(delay=5, max_delay=60,backoff=15)
	def getContent(self,ids,productName,page):
		print(f"第{page}页评论")
		params2 ={
		'id':ids,
		'productName':productName,
		'pageSize':10,
		'pageIndex':page
		}
		resp = requests.get(f"http://huodong.ctrip.com/Activity-Booking-OnlineWebSite/Recommend/UserComments?id={ids}&productName={productName}&pageSize=10&pageIndex={page}", headers=headers)
		aaa  = resp.text
		if aaa != '':
			for cc in jq(aaa)('li').items():
				bbb = cc('.ticket_user_left').remove('*').text()
				if bbb != '':
					list = ['','','','',bbb]
					self.mylist.append(list)
					self.content = self.content + bbb


	def start(self):
		for x in range(1,40):
			self.getLuxian(x)

		df = pd.DataFrame(self.mylist)
		df.rename(columns=datacol,inplace=True)
		df.to_excel('weibo.xlsx')	

		with open('content.ekl','wb') as f:
			pickle.dump(self.content,f)



if __name__ == '__main__':
	weibo().start()


