# -*- coding: utf-8 -*-
import random
import re
import time
from collections import OrderedDict
from datetime import datetime
import pandas as pd
from lxml import etree
from selenium import webdriver
import json
from pymongo import MongoClient

url = 'https://weibo.com/login.php'
# 采用selenium虚拟登录
browser = webdriver.Chrome()    
#url = input('输入网址：')
browser.get(url)

#隐式等待
browser.implicitly_wait(10)

#读取cookie
with open('cookie.json','rb') as f:
		 cookie=json.load(f)
#添加cookie
for i in cookie:
	browser.add_cookie(i)
 
#对页面进行刷新	
browser.refresh()	
time.sleep(3)
#变量的声明
global page
page=1
global p
p=1 
#关键词搜索
key_word='新冠疫情'
start_time='2021-6-7-8'
end_time='2021-6-8-8'
sleep_time=10
#page=int(input('输入页码:'))
page=10

#  通过构造的搜索 url，跳转到搜索结果页面
start_time = datetime.strptime(start_time, "%Y-%m-%d-%H").strftime("%Y-%m-%d-%H")
if end_time:
    end_time = datetime.strptime(end_time, "%Y-%m-%d-%H").strftime("%Y-%m-%d-%H")
else:
    end_time = datetime.now().strftime('%Y-%m-%d-%H')

#请求的url	
search_url = 'https://s.weibo.com/weibo/{keyword}' \
         '&timescope=custom:{start_time}:{end_time}&refer=g&page={page_num}'

#时间处理
def get_date(str_time):
       """
       格式化传入的时间字符串
       """
       str_time = str_time.strip().replace(' ', '')

       if '今天' in str_time or '分钟前' in str_time or '秒前' in str_time:
           return datetime.now().date()

       str_time = re.findall(r"(.*\d\d月\d\d日).*", str_time)[0]  # 使用正则表达式找出时间

       if "年" not in str_time:
           now_year = datetime.now().year
           str_time = str(now_year) + "年" + str_time

       date = datetime.strptime(str_time, '%Y年%m月%d日')
       return date.date()  
   
#评论数处理
def get_num(string):
        """
        用户获取评论数、点赞数等
        """
        number = re.findall(r"(\d+)", string)
        if number:
            return number[0]
        else:
            return '0' 
save_data = OrderedDict({  # 保存抓取的数据
    "昵称": [],
	"用户头像":[],
    "微博正文": [],
    "微博链接": [],
    "时间": [],
    "转发数": [],
    "评论数": [],
    "点赞数": [],
    "设备": [],
	"图片":[],
	"视频链接":[]
})

# 解析数据用的 xpath 语法
xpath_dict = {  
    '昵称': '//div[@class="info"]/div[2]/a/@nick-name',
	'用户头像':'//div[@class="avator"]/a/img/@src',
    '微博正文': '//div[@class="content"]/p[@node-type="feed_list_content"]',
    '微博链接': '//div[@class="content"]/p[@class="from"]/a[1]/@href',
    '时间': '//div[@class="content"]/p[@class="from"]',
    '转发数': '//div[@class="card-act"]/ul/li[1]/a/text()',
    '评论数': '//div[@class="card-act"]/ul/li[2]/a/text()',
    '点赞数': '//div[@class="card-act"]/ul/li[3]/a/button/span[2]/text()',
    '设备': '//div[@class="content"]/p[@class="from"]',
	'图片':'//div[@class="content"]',
	'视频链接':'//div[@class="content"]'
    }


base_sava_path = ' '  # 输出文件的保存路径
save_file_name = key_word + start_time + "~" + end_time  # 输出文件名

for i in range(10):
	s_url = search_url.format(keyword=key_word, start_time=start_time, end_time=end_time,page_num=i+1)
	browser.get(s_url)
	time.sleep(5)
	source=browser.page_source
	#获取评论链接
	etree_html = etree.HTML(source)
	#匹配到视频链接
	dr=source
	dr1=dr.split('class="card-wrap')
	
	#print('正在抓取第%s页内容：' % l, end='')
	number = ['点赞数','收藏数', '评论数','转发']
	
	for key in xpath_dict.keys():
		xpath = xpath_dict.get(key)
		data = etree_html.xpath(xpath)
		if data:
			if key in number:
				for i in data:
					save_data[key].append(get_num(i))
#			elif key=='转发数':
#				for i in  data:
#					bh=i.strip()
#					save_data[key].append(get_num(bh))
			elif key == '时间':
				 s1=[i.xpath('string(.)').split( ) for i in data]
				 for i in s1:
					 save_data[key].append(get_date(i[0]+''+i[1]).strftime('%Y-%m-%d'))
			elif key == '微博正文':
				for i in data:
					save_data[key].append(i.xpath('string(.)').strip())     
			elif key=='设备':
				s11=[i.xpath('string(.)').split( ) for i in data]
				for i in s11:
					save_data[key].append(''.join(i[2:]))
			elif key=='图片链接':
				for i in range(len(data)):
					b=etree_html.xpath('//*[@id="pl_feedlist_index"]/div[1]/div[{}]/div/div[1]/div[2]/div[2]/div/ul/li/img/@src'.format(i+1))	
					if len(b)==0:
						save_data[key].append('None')
					else:
						aj='\n'.join(b)
						save_data[key].append(aj)
			elif key=='微博地址':
				for i in range(len(data)):
					g=etree_html.xpath('//*[@id="pl_feedlist_index"]/div[1]/div[{}]/div/div[1]/div[2]/p[2]/a[@action-type="fl_unfold"]/@href'.format(i+1))
					if len(g)==0:
						save_data[key].append('None')
					else:
						aj='\n'.join(g)
						save_data[key].append('https:'+''+aj)
				
			elif key=='视频链接':
				all=[]
				for h in range(len(dr1)):
					if "feed_list_item" in dr1[h]:
						p=re.compile(r"src:'(.*?)unistore,video",re.S)
						vh=re.findall(p,dr1[h])
						if len(vh)==0:
						    save_data[key].append('None')
						else:
							save_data[key].append('https:'+''+vh[0]+''+'unistore,video')
					else:
						pass
			elif  key=='微博链接':
				for i in data:
					save_data[key].append('https:'+i)
			else:
				for i in data:
					save_data[key].append(i)
		else:
			if key in number:
				save_data[key].append('0')
	
			else:
				save_data[key].append('')
data = pd.DataFrame(save_data)
	













