import sys
import re
from collections import OrderedDict
from datetime import datetime
import pandas as pd
from lxml import etree
from pymongo import MongoClient
import requests


#key_word='新冠疫情'
#
#start_time='2021-6-7-8'
#end_time='2021-6-8-8'
#sleep_time=10
key_word = input('请输入搜索关键字（输入q退出）：')
if key_word == 'q':
    print("正在退出....")
    sys.exit(0)
start_time = input('请输入开始时间（输入格式：2020-6-1-8 代表年-月-日-小时）：')
end_time = input('请输入结束时间---不输入默认为直至现在（输入格式：2020-6-2-8代表年-月-日-小时）：')
sleep_time = input('输入页面间的访问间隔时间(默认10秒)：')

#  通过构造的搜索 url，跳转到搜索结果页面
start_time = datetime.strptime(start_time, "%Y-%m-%d-%H").strftime("%Y-%m-%d-%H")
if end_time:
    end_time = datetime.strptime(end_time, "%Y-%m-%d-%H").strftime("%Y-%m-%d-%H")
else:
    end_time = datetime.now().strftime('%Y-%m-%d-%H')



#保持会话
session = requests.session()
   
session.verify = False 

session.headers={
		'Cookie':'SINAGLOBAL=5120103893645.061.1636429386558; wvr=6; UOR=,,www.baidu.com; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WWSQQksOo8B5zqHq1ZKgviv5JpX5KMhUgL.FoMXe0-RSo50SKz2dJLoIf2LxKML1K.LB.-LxKBLB.2L12zLxK-L122LBK5LxK-L12qLB-qLxKBLBo.L1K5LxKqLBozLBK2LxKqL1-eL1h.LxK.LBonLBK2LxKMLB.qL1h-t; ULV=1636986458726:24:24:10:2957412011917.948.1636986458672:1636933280652; ALF=1668557810; SSOLoginState=1637021811; SCF=Ai55wZo9w8e_JgDVFOtdj7TrylhjonC_STLDDU7W02rkFY-a3ZhU1pRpVviCfHzV-e8jWsIdtNWblKvDaLoKOhc.; SUB=_2A25MlogjDeRhGeFK6FcZ9i7Pzj6IHXVv5f7rrDV8PUNbmtB-LRblkW9NQ3pQDE7OHR7B7WH0BukUI8irT_Hb8_XX; XSRF-TOKEN=3oe0gYyJbX-PX3RVwWR7LyWd; WBPSESS=Dt2hbAUaXfkVprjyrAZT_PlfJPuYSqOXNYT_5GxsSsgY5p94M_fhbTHZtVNog1A4XSYBlLhiMDns7JCbZfgn8cRShpUZOe280HZC9ucLr6M37hpYVOhrKhcQ1lbqGTlmPc3IUang3UaAIhhKrUTAKbmaihSXNgrHBTJpL89pYrYWMDxLz6kTkGJeh4dJs2NRUlCUqBnbQpQBZ4XwYc9Jgg==',
        'User-Agent':'Chrome/94.0.4606.81 Safari/537.36',
		'referer': 'https://passport.weibo.com/'
		}


#时间处理
def get_date(str_time):
       """
       格式化传入的时间字符串
       """
       str_time = str_time.strip().replace(' ', '')

       if '今天' in str_time or '分钟前' in str_time or '秒前' in str_time:
           return datetime.now().date()

       str_time = re.findall(r"(.*\d\d月\d\d日).*", str_time)[0]  # 使用正则表达式找出时间

       if "年" not in str_time:
           now_year = datetime.now().year
           str_time = str(now_year) + "年" + str_time

       date = datetime.strptime(str_time, '%Y年%m月%d日')
       return date.date()  
   
#获取评论数
def get_num(string):
        """
        用户获取评论数、点赞数等
        """
        number = re.findall(r"(\d+)", string)
        if number:
            return number[0]
        else:
            return '0' 
		 
save_data = OrderedDict({  # 保存抓取的数据
    "昵称": [],
	"用户头像":[],
    "微博正文": [],
    "微博链接": [],
    "时间": [],
    "收藏数": [],
    "转发数": [],
    "评论数": [],
    "点赞数": [],
    "设备": [],
	"图片链接":[],
	"视频链接":[],
	"微博正文地址":[],
})

# 解析数据用的 xpath 语法
xpath_dict = {  
    '昵称': '//div[@class="info"]/div[2]/a/@nick-name',
	'用户头像':'//div[@class="avator"]/a/img/@src',
    '微博正文': '//div[@class="content"]/p[@node-type="feed_list_content"]',
    '微博链接': '//div[@class="content"]/p[@class="from"]/a[1]/@href',
    '时间': '//div[@class="content"]/p[@class="from"]',
    '收藏数': '//a[@action-type="feed_list_favorite"]/text()',
    '转发数': '//div[@class="card-act"]/ul/li[1]/a',
    '评论数': '//a[@action-type="feed_list_comment"]/text()',
    '点赞数': '//div[@class="card-act"]/ul/li[3]/a/button/span[2]/text()',
    '设备': '//div[@class="content"]/p[@class="from"]',
	'图片链接':'//div[@class="content"]',
	'视频链接':'//div[@class="card-wrap"]',
	'微博正文地址':'//div[@class="content"]'
    }

  # 输出文件的保存路径和输出文件名
base_sava_path = ' '
save_file_name = key_word + start_time + "~" + end_time 
#构造url
search_url = 'https://s.weibo.com/weibo/{keyword}' \
         '&timescope=custom:{start_time}:{end_time}&refer=g&page={page_next}'

for w in range(10):
	s_url = search_url.format(keyword=key_word, start_time=start_time, end_time=end_time,page_next=w+1)
	if not sleep_time:
		sleep_time = 10
	else:
	    sleep_time = int(sleep_time)
	source=session.get(s_url)
	#获取评论链接
	etree_html = etree.HTML(source.text)
	#匹配到视频链接
	dr=source.text
	dr1=dr.split('class="card-wrap')
	
	#print('正在抓取第%s页内容：' % l, end='')
	number = ['点赞数','收藏数', '评论数']
	
	for key in xpath_dict.keys():
		xpath = xpath_dict.get(key)
		data = etree_html.xpath(xpath)
		if data:
			if key in number:
				for i in data:
					save_data[key].append(get_num(i))
			elif key=='转发数':
				for i in  data:
					bh=i.xpath('string(.)').strip()
					save_data[key].append(get_num(bh))
			elif key == '时间':
				 s1=[i.xpath('string(.)').split( ) for i in data]
				 for i in s1:
					 save_data[key].append(get_date(i[0]+''+i[1]).strftime('%Y-%m-%d'))
			elif key == '微博正文':
				for i in data:
					save_data[key].append(i.xpath('string(.)').strip())     
			elif key=='设备':
				s11=[i.xpath('string(.)').split( ) for i in data]
				for i in s11:
					save_data[key].append(''.join(i[2:]))
			elif key=='图片链接':
				for i in range(len(data)):
					b=etree_html.xpath('//*[@id="pl_feedlist_index"]/div[1]/div[{}]/div/div[1]/div[2]/div[2]/div/ul/li/img/@src'.format(i+1))	
					if len(b)==0:
						save_data[key].append('None')
					else:
						aj='\n'.join(b)
						save_data[key].append(aj)
			elif key=='微博正文地址':
				for i in range(len(data)):
					g=etree_html.xpath('//*[@id="pl_feedlist_index"]/div[1]/div[{}]/div/div[1]/div[2]/p[2]/a[@action-type="fl_unfold"]/@href'.format(i+1))
					if len(g)==0:
						save_data[key].append('None')
					else:
						aj='\n'.join(g)
						save_data[key].append('https:'+''+aj)
				
			elif key=='视频链接':
				all=[]
				for h in range(len(dr1)):
					if "feed_list_item" in dr1[h]:
						p=re.compile(r"src:'(.*?)unistore,video",re.S)
						vh=re.findall(p,dr1[h])
						if len(vh)==0:
						    save_data[key].append('None')
						else:
							save_data[key].append('https:'+''+vh[0]+''+'unistore,video')
					else:
						pass
			elif  key=='微博链接':
				for i in data:
					save_data[key].append('https:'+i)
			else:
				for i in data:
					save_data[key].append(i)
		else:
			if key in number:
				save_data[key].append('0')
	
			else:
				save_data[key].append('')
data = pd.DataFrame(save_data)
#保存数据到数据库中
#连接数据库
#Client=MongoClient()
#db=Client['wb']
#persons=db.comtent
#persons.insert(save_data)


#只是对其中的一个微博正文评论进行获取
'''
#使用selenium对评论内容进行获取
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import json
import time

d=data['微博正文地址']
pl_save=[]
#for g in range(len(d)):
r=d[6]#获取目标网页的url
url = 'https://weibo.com/login.php'
chrome_options =Options()
chrome_options.add_argument('--headless')
browser = webdriver.Chrome(options=chrome_options)
time.sleep(3)
browser.get(url)

#隐式等待
browser.implicitly_wait(10)

#读取cookie
with open('cookie.json','rb') as f:
		 cookie=json.load(f)
#将cookie添加到浏览器对象中
for i in cookie:
	browser.add_cookie(i)
browser.get(r)
browser.refresh()
#滑到底部，加载完成
js = "var q=document.documentElement.scrollTop=10000"         
browser.execute_script(js)	
time.sleep(6)

#获取页面源代码
a=browser.page_source
browser.quit()
etree_html=etree.HTML(a)

content=[]
#获取评论内容
comment1=etree_html.xpath('//div[@class="vue-recycle-scroller__item-view"]')
for i in range(len(comment1)):
	b=etree_html.xpath('//*[@id="scroller"]/div[1]/div[{}]/div/div/div/div[1]/div[2]/div[1]/span/text()'.format(i+1))
	if len(b)==0:
		content.append('None')
	else:
		content.append(b[0])
		
##获取评论用户id
content_id=etree_html.xpath('//div[@class="text"]/a/text()')
##获取评论头像
content_img=etree_html.xpath('//div[@class="item1in woo-box-flex"]/div[1]/a/div/img/@src')
'''





