# -*- coding: utf-8 -*-
"""
@Time : 2024/10/17 19:17
@Author : ChenXiaoliang
@Email : middlegod@sina.com
@File : weibo_search.py
"""
import urllib.parse
import requests
import time
import logging
from bs4 import BeautifulSoup

from sys_func.read_config import load_yaml

LOG_FORMAT = "%(levelname)s %(asctime)s - %(message)s"
logging.basicConfig(filename='monitor.log', level=logging.INFO, filemode='a', format=LOG_FORMAT)
logger = logging.getLogger()


def get_weibo_search(keyword):
    """
    根据查询关键字获取微博查询结果
    :param keyword: 查询关键字
    :return:
    """
    # keyword = config_data.get('weibo').get("search").get("kw")
    # 进行url编码操作
    # 等同于浏览器开发者工具-控制台中的函数encodeURIComponent()
    encode_keyword = urllib.parse.quote(keyword)

    base_url = "https://s.weibo.com/realtime"

    result_set = []
    # 获取配置文件中的页数配置值
    config_data = load_yaml('config_monitor.yaml')
    sub_info = config_data.get('weibo').get('cookie_sub')
    headers = {"X-Requested-With": "XMLHttpRequest",
               "Cookie": f"{sub_info}",
               "Referer": f"https://s.weibo.com/realtime?q={encode_keyword}&rd=realtime&tw=realtime&Refer=pic_realtime"}

    # 不要太大，因为有图片，所以会生成比较大size的文件，容易卡
    pages = config_data.get('weibo').get("search").get("pages")
    for page in range(1, pages + 1):
        now_temp = time.time() * 1000
        now = round(now_temp)
        target_url = f"{base_url}?q={encode_keyword}&rd=realtime&tw=realtime&page={page}"
        # print(f"请求的地址是 {target_url}")
        logger.info(f"请求的地址是 {target_url}")
        r = requests.get(target_url, headers=headers)
        soup = BeautifulSoup(r.text, "html.parser")
        # print(soup.prettify())
        results = soup.find_all('div', class_="content")
        for result in results:
            user_name = result.find('a', class_="name").get_text()
            # print(user_name)
            user_url = result.find('a', class_="name").get('href')
            # print(user_url)
            created_at = result.find('div', class_='from').get_text().strip().replace(' ', '').replace('\n', '')
            # print(created_at)
            # title = result.find('p', class_='txt').get_text().strip().replace(' ', '').replace('\n', '')
            # title = result.find('p', class_='txt').get_text().strip()
            title_text_ls = result.find_all('p', class_="txt")
            if len(title_text_ls) == 1:
                title = title_text_ls[0].get_text().strip()
            else:
                first_text = title_text_ls[0].get_text().strip()
                if "展开" in first_text:
                    title = title_text_ls[1].get_text()[:-2].strip('收起').strip()
                else:
                    title = title_text_ls[0].get_text().strip()
            # print(title)
            temp = {
                'title': title,
                'created_at': created_at,
                'user_name': user_name,
                'user_url': user_url
            }
            result_set.append(temp)

    # print(f"爬取的数据条数{len(result_set)}~")
    logger.info(f"爬取的数据条数{len(result_set)}~")
    return result_set


if __name__ == '__main__':
    get_weibo_search("python")
