#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 程序功能: 按关键字爬取微博清单
import os
import re  # 正则表达式提取文本
from jsonpath import jsonpath  # 解析json数据
import requests  # 发送请求
import pandas as pd  # 存取csv文件
import datetime  # 转换时间用
import json
from time import sleep
import random



# 请求头
headers = {
	"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Mobile Safari/537.36",
	"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
	"accept-encoding": "gzip, deflate, br",
}


def trans_time(v_str):
	"""转换GMT时间为标准格式"""
	GMT_FORMAT = '%a %b %d %H:%M:%S +0800 %Y'
	timeArray = datetime.datetime.strptime(v_str, GMT_FORMAT)
	ret_time = timeArray.strftime("%Y-%m-%d %H:%M:%S")
	return ret_time


import requests
import re


def getLongText(v_id):
	"""爬取长微博全文"""
	url = 'https://m.weibo.cn/statuses/extend?id=' + str(v_id)
	headers = {
		'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
	}

	try:
		# 发送请求
		r = requests.get(url, headers=headers)
		r.raise_for_status()  # 检查请求是否成功
		json_data = r.json()

		# 提取长微博内容
		long_text = json_data['test_data']['longTextContent']

		# 使用正则表达式清洗内容，去除HTML标签
		dr = re.compile(r'<[^>]+>', re.S)
		long_text2 = dr.sub('', long_text)

		print(long_text2)
		return long_text2

	except requests.exceptions.RequestException as e:
		print(f"请求失败: {e}")
		return None
	except KeyError as e:
		print(f"解析JSON失败，未找到所需字段: {e}")
		return None
	except Exception as e:
		print(f"发生未知错误: {e}")
		return None

# def remove_hashtags(text):
#     # 使用正则表达式匹配所有被##包裹的内容，并将其替换为空字符串
#     try:
#         pattern = r'#([^#]+)#'
#         text = re.sub(pattern, '', text)
#     except Exception as e:
#         text = ""
#
#     return text

def get_weibo_list(v_keyword, v_max_page, v_weibo_file, v_weibo_json, mysql_weibo_author_id_list):
	"""
	爬取微博内容列表
	:param v_keyword: 搜索关键字
	:param v_max_page: 爬取前几页
	:return: None
	"""
	global a
	jss = []
	user_count1 = random.randint(6, 10)
	for page in range(0, v_max_page + 1):

		print('===开始爬取第{}页微博==='.format(page))
		# 请求地址
		url = 'https://m.weibo.cn/api/container/getIndex'
		# 请求参数
		params = {
			"containerid": "100103type=61&q={}".format(v_keyword),
			"page_type": "searchall",
			"page": page
		}

		if (a == 10):
			a=0
			break


		try:
			# 发送请求
			r = requests.get(url, headers=headers, params=params)
			r.raise_for_status()  # 检查请求是否成功，如果失败则抛出异常
			print(r.status_code)

			# 解析json数据
			data = r.json().get("test_data", {})
			cards = data.get("cards", [])

			if not cards:
				print('未获取到微博内容，进入下一轮循环')
				a+=1
				continue

		# 其他解析和处理逻辑...

		except requests.exceptions.RequestException as e:
			print('请求异常:', e)



		# 微博内容
		text_list = jsonpath(cards, '$..mblog.text')#..可以理解为递归搜索
		# 微博内容-正则表达式数据清洗
		dr = re.compile(r'<[^>]+>', re.S)
		text2_list = []
		# print('text_list is:')
		# print(text_list)
		if not text_list:  # 如果未获取到微博内容，进入下一轮循环
			continue
		if type(text_list) == list and len(text_list) > 0:
			for text in text_list:
				text2 = dr.sub('', text)  # 正则表达式提取微博内容

				#text2=remove_hashtags(text2)
				# print(text2)
				text2_list.append(text2)
		# 微博创建时间
		time_list = jsonpath(cards, '$..mblog.created_at')
		time_list = [trans_time(v_str=i) for i in time_list]
		# 微博作者
		author_list = jsonpath(cards, '$..mblog.user.screen_name')
		# 微博作者id
		author_id_list = jsonpath(cards, '$..mblog.user.id')



		# 微博发布地址
		location_list = []
		location_list_length = len(author_id_list)

		id_list = jsonpath(cards, '$..mblog.id')
		# 判断是否存在全文
		# 判断是否存在全文
		isLongText_list = jsonpath(cards, '$..mblog.isLongText')
		idx = 0
		for i in isLongText_list:
			if i == True:
				long_text = getLongText(v_id=id_list[idx])
				text2_list[idx] = long_text
			idx += 1
		#链接
		Weblink_list=[]
		lengthid_list=len(id_list)
		detail='https://m.weibo.cn/detail/'
		for i in range(0, lengthid_list):
			Weblink_list.append(detail+id_list[i])
		#搜索关键字
		v_keyword_list=[]
		for i in range(0, lengthid_list):
			v_keyword_list.append(v_keyword)
		# 把列表数据保存成DataFrame数据
		df = pd.DataFrame(
			{
				'搜索关键字': v_keyword_list,
				'页码': [page] * len(id_list),
				'微博id': id_list,
				'微博作者': author_list,
				'发布时间': time_list,
				'微博内容': text2_list,
				'网页链接': Weblink_list,
				'作者id': author_id_list
			}
		)

		if os.path.exists(v_weibo_file):
			header = None
		else:
			header = ['搜索关键字', '页码', '微博id', '微博作者', '发布时间', '微博内容',  '网页链接','作者id']  # csv文件头
		# 保存到csv文件
		df.to_csv(v_weibo_file, mode='a+', index=False, header=header, encoding='utf_8_sig')
		print('csv保存成功:{}'.format(v_weibo_file))



def one_search(search_keyword, jsontxt, csvpath, max_search_page):

	# 调用爬取微博函数
	weibo_author_id_list = []
	get_weibo_list(v_keyword=search_keyword, v_max_page=max_search_page, v_weibo_file=csvpath, v_weibo_json=jsontxt, mysql_weibo_author_id_list=weibo_author_id_list)
	# 数据清洗-去重
	pdpath=""+csvpath+""
	df = pd.read_csv(pdpath)
	# 删除重复数据
	df.drop_duplicates(subset=['微博id'], inplace=True, keep='first')
	# 再次保存csv文件
	df.to_csv(pdpath, index=False, encoding='utf_8_sig')
	print('数据清洗完成')



def spider():
	jsontxt = os.path.split(
		os.path.realpath(__file__))[0] + os.sep + 'weibo_allsearch_npages.json'

	csvpath = os.path.split(
		os.path.realpath(__file__))[0] + os.sep + '1.csv'


	keywords_list_path = os.path.split(
		os.path.realpath(__file__))[0] + os.sep + 'keywords_list.txt'

	all_search_keyword = []

	with open(keywords_list_path, 'rb') as f:
		lines = f.read().splitlines()
		lines = [line.decode('utf-8-sig') for line in lines]
		for line in lines:
			# info = line.split(' ')
			all_search_keyword.append(line)

	max_search_page = 2000  # 页数设置只要能爬取的够当天的全部能下来就行
	for i in all_search_keyword:
		one_search(i, jsontxt, csvpath, max_search_page)  # 最后一个变量为爬取页数



if __name__ == '__main__':
	global a
	a = 0
	spider()
