# coding=utf-8
 
import json
from urllib import parse

import requests
from lxml import etree


# 开启session，省得手动存cookie
session = requests.session()

# 关键字
keyword = '湖州'

# url
weibo_url = f"http://s.weibo.com/weibo?q={keyword}"

# 请求头信息
headers = {
    'Host': 's.weibo.com',
    'Connection': 'keep-alive',
    'Cache-Control': 'max-age=0',
    'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="90", "Microsoft Edge";v="90"',
    'sec-ch-ua-mobile': '?0',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36 Edg/90.0.818.62',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'Sec-Fetch-Site': 'same-site',
    'Sec-Fetch-Mode': 'navigate',
    'Sec-Fetch-User': '?1',
    'Sec-Fetch-Dest': 'document',
    'Referer': 'https://weibo.com/',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6'
}

# get请求
response_body = session.get(weibo_url, headers=headers)

# 获取源码信息
response_content = str(response_body.content, encoding = "utf-8")

# 加载到解析器中
selector = etree.HTML(response_content)

# 查找对应的节点
div_content_list = selector.xpath('//div[@class="card-wrap"]/div[@class="card"]/div[@class="card-feed"]/div[@class="content"]')

print(len(div_content_list))

# 遍历节点
for div_content in div_content_list:
    # 从子节点上再查询用户名
    user_name = div_content.xpath('div[@class="info"]/div[2]/a//text()')
    user_name = user_name[0] if user_name else ''
    print(user_name)

    # 从子节点上再查询文字内容
    # 其实这里如果有过长的内容会在node-type="feed_list_content_full"这里属性里，我这里直接将没完全展示的和展示的一起弄出来了
    txt_content = div_content.xpath('p[@class="txt"]//text()')
    txt_content = ''.join(txt_content) if txt_content else ''
    # print(txt_content)

    # 从子节点上再查询视频内容，这个藏得有点深，之前一直没找出来
    video_content = div_content.xpath('div[@node-type="feed_list_media_prev"]/div[@class="thumbnail"]/a[@node-type="fl_h5_video"]/@action-data')
    # 这里很有意思，微博居然将内容两次url编码
    video_content =parse.unquote(parse.unquote(video_content[0])) if video_content else ''
    # 因为存在没有视频的微博所以这里需要判断一下
    if video_content:
        video_content = video_content.split('&video_src=')[1]
        # 这里拿到的url是直接可以播放的，如果要保存视频就写一个统一处理的get请求下载保存文件到本地
        video_content = 'http:' + video_content
    # print(video_content)

    # 从子节点上再查询图片内容
    img_content_list = div_content.xpath('div[@node-type="feed_list_media_prev"]/div[@node-type="fl_pic_list"]/ul/li/img/@src')
    # 需要给图片地址添加http:，所以这里弄一个新的list
    new_img_content = []
    for img in img_content_list:
        new_img_content.append('http:'+img)
    # print(new_img_content)

    # 因为我没有微博账号，所以只能看一页的内容，页面就没看到怎么处理了





















