from datetime import datetime, timedelta
from selenium import webdriver
import json
import time
import requests
import sys
import re
import os
import csv
sys.setrecursionlimit(20000)		# 提高迭代次数上限


class weibo(object):

	def __init__(self, config):
		"""初始化Weibo类"""
		self.user_ids = config['user_ids']
		since_date = datetime.strptime(config['since_date'], '%Y-%m-%d')		# 获取爬取微博的起始时间
		end_date = datetime.strptime(config['end_date'], '%Y-%m-%d')		# 获取爬取微博的截止时间
		self.since_date = since_date
		self.end_date = end_date
		self.write_formats = config['write_formats']		# 写入文件的格式
		self.with_idols = config['with_idols']		# 是否爬取用户关注
		self.with_fans = config['with_fans']		# 是否爬取用户粉丝
		self.with_blogs = config['with_blogs']		# 是否爬取用户博文

	def get_index_json(self, params):
		"""获取微博页面json文件"""
		url = 'https://m.weibo.cn/api/container/getIndex?'
		result = requests.get(url, params=params).json()
		return result

	def get_second_json(self, params):
		"""获取关注或粉丝页面json文件"""
		url = 'https://m.weibo.cn/api/container/getSecond?'
		result = requests.get(url, params=params).json()
		return result

	def print_user(self, i, user):
		"""打印用户信息"""
		print('=====================================')
		print('当前是第 ' + str(i) + ' 个微博用户：')
		print('用户id：' + str(user['user_id']))
		print('用户名称：' + user['user_name'])
		print('用户介绍：' + user['user_description'])
		if user['user_v']:
			print('用户认证：' + user['user_v_reason'])
		print('用户性别：' + user['user_gender'])
		print('粉丝数：' + str(user['user_fan']))
		print('关注数：' + str(user['user_idol']))
		print('发文量：' + str(user['user_blog']))

	def user(self, i, user_id):
		"""定义用户信息爬取方法"""
		user = {}		# 定义接收结果的字典
		params = {'containerid': '100505' + str(user_id)}		# 设置用户信息页面的传递参数
		try:
			user_dict = self.get_index_json(params)		# 若可以访问，返回json字典
		except Exception:
			print('获取得太快了哦，休息5分钟。。。')
			time.sleep(300)
			user_dict = self.get_index_json(params)		# 若不可访问，暂停一段时间后继续访问
		if user_dict['ok']:		# 用户id存在
			user_info = user_dict['data']['userInfo']
			user['user_id'] = user_info['id']
			user['user_name'] = user_info['screen_name']
			user['user_description'] = user_info['description']
			user['user_v'] = user_info['verified']
			if user['user_v']:		# 用户为认证用户
				user['user_v_reason'] = user_info['verified_reason']		# 记录认证原因
			else:
				user['user_v_reason'] = ""
			user['user_gender'] = user_info['gender']
			user['user_fan'] = user_info['followers_count']
			user['user_idol'] = user_info['follow_count']
			user['user_blog'] = user_info['statuses_count']
			self.print_user(i, user)
			time.sleep(2)
		else:		# 用户id不存在
			print('当前是第 ' + str(i) + ' 个微博用户：')
			print('未找到该用户信息！')
		return user

	def idols(self, i, user_id):
		"""定义用户关注爬取方法"""
		print('-------------------------------------')
		print('开始采集-用户' + str(i) + '-关注：')
		idols = {}
		page = 1		# 当前用户关注所在页
		max_page = sys.maxsize
		while page <= max_page:
			params = {'containerid': '100505' + str(user_id) + '_-_FOLLOWERS', 'page': page}
			try:
				idol_dict = self.get_second_json(params)
			except Exception:
				print('获取得太快了哦，休息5分钟。。。')
				time.sleep(300)
				idol_dict = self.get_second_json(params)
			if idol_dict['ok']:
				idol_info = idol_dict['data']
				max_page = idol_info['maxPage']
				idol_list = idol_info['cards']
				page = idol_info['cardlistInfo']['page']
				for card in idol_list:		# 遍历用户关注列表
					if card['card_type'] == 10:
						idol_id = card['user']['id']
						idols[idol_id] = {}
						idols[idol_id]['idol_name'] = card['user']['screen_name']
						idols[idol_id]['idol_description'] = card['user']['description']
						idols[idol_id]['idol_v'] = card['user']['verified']
						if idols[idol_id]['idol_v']:
							idols[idol_id]['idol_v_reason'] = card['user']['verified_reason']
						else:
							idols[idol_id]['idol_v_reason'] = ""
						idols[idol_id]['idol_gender'] = card['user']['gender']
						idols[idol_id]['idol_fan'] = card['user']['followers_count']
						idols[idol_id]['idol_idol'] = card['user']['follow_count']
						idols[idol_id]['idol_blog'] = card['user']['statuses_count']
				print('用户' + str(i) + '-共有关注' + str(max_page) + '页，当前已完成' + str(page-1) + '页')
				time.sleep(2)
			else:
				break
		print('用户' + str(i) + '-关注已采集完成！')
		return idols

	def fans(self, i, user_id):
		"""定义用户粉丝爬取方法"""
		print('-------------------------------------')
		print('开始采集-用户' + str(i) + '-粉丝：')
		fans = {}
		page = 1
		max_page = sys.maxsize
		while page <= max_page:
			params = {'containerid': '100505' + str(user_id) + '_-_FANS', 'page': page}
			try:
				fan_dict = self.get_second_json(params)
			except Exception:
				print('获取得太快了哦，休息5分钟。。。')
				time.sleep(300)
				fan_dict = self.get_second_json(params)
			if fan_dict['ok']:
				fan_info = fan_dict['data']
				max_page = fan_info['maxPage']
				fan_list = fan_info['cards']
				page = fan_info['cardlistInfo']['page']
				for card in fan_list:
					if card['card_type'] == 10:
						fan_id = card['user']['id']
						fans[fan_id] = {}
						fans[fan_id]['fan_name'] = card['user']['screen_name']
						fans[fan_id]['fan_description'] = card['user']['description']
						fans[fan_id]['fan_v'] = card['user']['verified']
						if fans[fan_id]['fan_v']:
							fans[fan_id]['fan_v_reason'] = card['user']['verified_reason']
						else:
							fans[fan_id]['fan_v_reason'] = ""
						fans[fan_id]['fan_gender'] = card['user']['gender']
						fans[fan_id]['fan_fan'] = card['user']['followers_count']
						fans[fan_id]['fan_idol'] = card['user']['follow_count']
						fans[fan_id]['fan_blog'] = card['user']['statuses_count']
				print('用户' + str(i) + '-共有粉丝' + str(max_page) + '页，当前已完成' + str(page - 1) + '页')
				time.sleep(2)
			else:
				break
		print('用户' + str(i) + '-粉丝已采集完成！')
		return fans

	def date_standardization(self, publish_date):
		"""标准化博文发布时间"""
		if "刚刚" in publish_date:
			publish_date = datetime.now()
		elif "分钟" in publish_date:
			minute = publish_date[:publish_date.find("分钟")]
			minute = timedelta(minutes=int(minute))
			publish_date = (datetime.now() - minute)
		elif "小时" in publish_date:
			hour = publish_date[:publish_date.find("小时")]
			hour = timedelta(hours=int(hour))
			publish_date = (datetime.now() - hour)
		elif "昨天" in publish_date:
			day = timedelta(days=1)
			publish_date = (datetime.now() - day)
		elif publish_date.count('-') == 1:
			year = datetime.now().strftime("%Y")
			publish_date = year + "-" + publish_date
			publish_date = datetime.strptime(publish_date, '%Y-%m-%d')
		else:
			publish_date = datetime.strptime(publish_date, '%Y-%m-%d')
		return publish_date

	def get_long(self, blog_id):
		"""获取长博文"""
		url = 'https://m.weibo.cn/detail/' + blog_id
		try:
			html = requests.get(url).text
		except Exception:
			print('获取得太快了哦，休息5分钟。。。')
			time.sleep(300)
			html = requests.get(url).text
		html = html[html.find('"status":'):]
		html = html[:html.rfind('"hotScheme"')]
		html = html[:html.rfind(',')]
		html = '{' + html + '}'
		html_json = json.loads(html, strict=False).get('status')
		html_text = ''
		if html_json:
			html_text = html_json['text']
		time.sleep(1.5)
		return html_text

	def print_blog(self, i, blog_dict):
		"""打印单条博文"""
		print('用户' + str(i) + '-' + blog_dict['blog_status'] + '\t' + blog_dict['blog_date'])
		print(blog_dict['blog_text'])
		print('转发：' + str(blog_dict['blog_repost']) + '\t' +
			  '评论：' + str(blog_dict['blog_comment']) + '\t' +
			  '点赞：' + str(blog_dict['blog_attitude']))
		print('-----------------')

	def blogs(self, i, user_id):
		print('-------------------------------------')
		print('开始采集-用户' + str(i) + '-博文：')
		time_n = datetime.now()
		blogs = {}
		page = 1
		flag = 0
		while page:
			params = {'containerid': '107603' + str(user_id), 'page': page}
			try:
				blog_dict = self.get_index_json(params)
			except Exception:
				print('获取得太快了哦，休息5分钟。。。')
				time.sleep(300)
				blog_dict = self.get_index_json(params)
			if blog_dict['ok']:
				blog_info = blog_dict['data']
				blog_list = blog_info['cards']
				for card in blog_list:
					if card['card_type'] == 9:
						if 'isTop' in card['mblog']:		# 判断置顶微博
							continue
						publish_date = self.date_standardization(card['mblog']['created_at'])		# 标准化发布时间
						if self.since_date <= publish_date <= self.end_date:		# 判断发布时间是否在爬虫设置范围内
							blog_id = card['mblog']['id']
							blogs[blog_id] = {}
							blogs[blog_id]['blog_date'] = publish_date.strftime('%Y-%m-%d')
							if 'retweeted_status' in card['mblog'].keys():		# 判断是否原创
								blog_status = '转载'
							else:
								blog_status = '原创'
							blogs[blog_id]['blog_status'] = blog_status
							blog_text = card['mblog']['text']
							if card['mblog']['isLongText'] == True:		# 判断是否是长博文
								html_text = self.get_long(blog_id)		# 获取长博文
								if html_text:		# 判断长博文是否可以访问，不可访问则默认储存短博文
									blog_text = html_text
							blog_text = re.sub('<[^<]+?>', '', blog_text)
							blogs[blog_id]['blog_text'] = blog_text.replace('\n', '').strip()
							blogs[blog_id]['blog_repost'] = card['mblog']['reposts_count']
							blogs[blog_id]['blog_comment'] = card['mblog']['comments_count']
							blogs[blog_id]['blog_attitude'] = card['mblog']['attitudes_count']
							self.print_blog(i, blogs[blog_id])
						elif publish_date < self.since_date:
							flag = 1
							break
				page += 1
				if flag:
					page = 0
				time_l = time_n
				time_n = datetime.now()
				if (time_n - time_l).seconds < 2:
					time.sleep(2)
			else:
				page = 0
		print('用户' + str(i) + '-博文已采集完成！')
		return blogs

	def write_json(self, user_id, data, file_name):
		"""写入json文件"""
		fw = open("data/weibo/" + str(user_id) + "/"  + str(user_id) + "-" + file_name + ".json", "w", encoding='utf8')
		json.dump(data, fw, indent=4)
		fw.close()

	def write_csv(self, user_id, data, file_name):
		"""写入csv文件"""
		fw = open("data/weibo/" + str(user_id) + "/" + str(user_id) + "-" + file_name + ".csv", "w", encoding='utf8', newline='')
		csv_writer = csv.writer(fw)
		if data:
			keys = list(data[list(data.keys())[0]].keys())
			header = ['id'] + keys
			csv_writer.writerow(header)
			for k, v in data.items():
				v_list = [k]
				for key in keys:
					v_list.append(data[k][key])
				csv_writer.writerow(v_list)
		fw.close()

	def write_txt(self, user_id, data, file_name):
		"""写入txt文件"""
		fw = open("data/weibo/" + str(user_id) + "/" + str(user_id) + "-" + file_name + ".txt", "w", encoding='utf8')
		for k, v in data.items():
			fw.write(str(k) + '：' + str(v) + '\n')
		fw.close()

	def write_file(self, user_id, data, file_name):
		"""文件写入方法"""
		if not os.path.isdir('data/weibo/' + str(user_id) + '/'):
			os.makedirs('data/weibo/' + str(user_id) + '/')
		if 'json' in self.write_formats:
			self.write_json(user_id, data, file_name)
		if 'csv' in self.write_formats:
			if file_name in ['idols', 'fans', 'blogs']: 	# csv文件不可写入user信息
				self.write_csv(user_id, data, file_name)
		if 'txt' in self.write_formats:		# txt文件只可写入user信息
			if file_name == 'user':
				self.write_txt(user_id, data, file_name)

	def start(self, weibo_index=0):
		"""方法入口"""
		user_ids = self.user_ids
		for i in range(weibo_index, len(user_ids)):
			user_id = str(user_ids[i])
			result = {'user': self.user(i, user_id)}
			if result['user']:
				if self.with_idols:
					result['idols'] = self.idols(i, user_id)
				if self.with_fans:
					result['fans'] = self.fans(i, user_id)
				if self.with_blogs:
					result['blogs'] = self.blogs(i, user_id)
			print('正在写入文件。。。')
			for key in result.keys():
				self.write_file(user_id, result[key], key)
			print('成功写入文件！')


class blog(object):

	def __init__(self, config):
		"""初始化blog类"""
		self.blog_urls = config['blog_urls']
		self.with_retweets = config['with_retweets']		# 是否爬取博文转发
		self.with_comments = config['with_comments']		# 是否爬取博文评论

	def retweets(self, i, blog_url, browser):
		"""定义博文转发爬取方法"""
		def next_page(page):
			"""迭代方法爬取博文下一页转发"""
			time.sleep(2)
			try:
				browser.find_element_by_css_selector('.W_icon.icon_warnB')		# 判断页面中是否有“暂无转发信息"
				return retweets
			except Exception:
				cards = browser.find_elements_by_class_name('list_con')		# 获取转发信息列表
				for card in cards:		# 遍历博文转发信息列表
					# 获取转发用户id
					retweeter_id = card.find_element_by_class_name('WB_text').find_elements_by_tag_name('a')[0].get_attribute('usercard')[3:]
					if retweeter_id not in retweets.keys():		# 判断同一用户是否多次转发
						retweets[retweeter_id] = []
					retweet = card.find_element_by_class_name('WB_text').text		# 获取转发文本
					i = retweet.find('：')
					retweet = retweet[i + 1:]
					retweets[retweeter_id].append(retweet)
				time.sleep(0.5)
				try:		# 判断是否有翻页组件
					pages = browser.find_element_by_class_name('W_pages').find_elements_by_tag_name('a')
				except Exception:
					pages = ''
				if pages == '': # 仅有一页
					return retweets
				for a in pages:
					try:
						a_attrs = a.get_attribute("action-data")
						if a_attrs:
							page_next = int(a_attrs.split('=')[-1])
							if page_next == (page + 1):		# 判断下一页是否存在
								a.click()
								page = page_next
								next_page(page)
					except Exception:
						continue
				return retweets

		print('开始采集-博文' + str(i) + '-转发信息。。。')
		url = blog_url + '?type=repost'		# 构建博文转发页面url
		browser.get(url)
		page = 1
		retweets = {}
		retweets = next_page(page)
		print(retweets)
		print('博文' + str(i) + '-转发信息已采集完成！')
		return retweets

	def comments(self, i, blog_url, browser):
		"""定义博文评论爬取方法"""
		def next_page(page):
			"""迭代方法加载所有评论"""
			if page == 1:		# 第一次进入博文评论页面
				time.sleep(3)
				# 点击tab栏的“全部”，显示全部评论
				time_index = browser.find_elements_by_class_name('tab')[-2].find_elements_by_tag_name('a')[-1]
				time_index.click()
				time.sleep(3)
				browser.execute_script('window.scrollTo(0,10000)')		# 模拟下拉操作，直到显示加载按钮
				time.sleep(1)
				browser.execute_script('window.scrollTo(0,10000)')
				time.sleep(1)
				browser.execute_script('window.scrollTo(0,10000)')
			time.sleep(1)
			try:
				loads = browser.find_element_by_css_selector('.WB_cardmore.S_txt1.S_line1.clearfix')		# 定位加载按钮
			except Exception:
				return
			loads.click() # 点击加载
			time.sleep(1)
			try:
				browser.find_element_by_css_selector('.WB_cardmore.S_txt1.S_line1.clearfix') # 寻找下一个加载
			except Exception:
				return # 无加载返回
			page += 1
			next_page(page)

		def get_comments():
			"""获取评论"""
			cards = browser.find_element_by_class_name('repeat_list').find_element_by_class_name(
				'list_box').find_elements_by_css_selector('.list_li.S_line1.clearfix')		# 获取评论信息列表
			comments = {}
			for card in cards:		# 遍历评论信息列表
				# 获取评论用户id
				commentater_id = card.find_element_by_class_name('WB_text').find_elements_by_tag_name('a')[0].get_attribute('usercard')[3:]
				if commentater_id not in comments.keys():		# 判断用户是否多次评论
					comments[commentater_id] = []
				comment = card.find_element_by_class_name('WB_text').text		# 获取评论文本
				i = comment.find('：')
				comment = comment[i + 1:]
				comments[commentater_id].append(comment)
			return comments

		print('开始采集-博文' + str(i) + '-评论信息。。。')
		url = blog_url + '?type=comment'		# 构建博文评论页面url
		browser.get(url)
		page_now = 1
		next_page(page_now)
		comments = get_comments()
		print('博文' + str(i) + '-评论信息已采集完成！')
		return comments

	def write_file(self, blog_xid, data, file_name):
		"""写入文件"""
		if not os.path.isdir('data/blog/' + blog_xid + '/'):
			os.makedirs('data/blog/' + blog_xid + '/')
		fw = open("data/blog/" + blog_xid + "/" + file_name + ".json", "w", encoding='utf8')
		json.dump(data, fw, indent=4)
		fw.close()

	def start(self, blog_index=0):
		"""方法入口"""
		if not self.with_retweets and not self.with_comments:
			print('请在获取转发或获取评论中至少选择一项功能！')
			return
		browser = webdriver.Firefox()		# 启动浏览器
		browser.get('https://weibo.com/')		# 首先进入微博主页并登录，因为查看博文全部信息需要登陆
		time.sleep(30)		# 预留30秒的时间可以登陆
		blog_urls = self.blog_urls
		for i in range(blog_index, len(blog_urls)):
			blog_url = str(blog_urls[i]).split('?')[0]
			result = {}
			blog_xid = blog_url.split('/')[-2] + '-' + blog_url.split('/')[-1]
			if self.with_retweets:
				result['retweets'] = self.retweets(i, blog_url, browser)
			if self.with_comments:
				result['comments'] = self.comments(i, blog_url, browser)
			print('正在写入文件。。。')
			for key in result.keys():
				self.write_file(blog_xid, result[key], key)
			print('成功写入文件！')
		browser.quit()