# -*- coding: utf-8 -*-
"""
@Time : 2023/8/27 21:19
@Author : ChenXiaoliang
@Email : middlegod@sina.com
@File : weibo_img_downloader.py
"""

import requests
import time
import urllib.parse
from docx import Document
from docx.shared import Inches
import os
import sys

keyword = input("请输入查询关键词： ")
if not keyword.strip():
    sys.exit("程序退出，必须输入关键词")
# 进行url编码操作
# 等同于浏览器开发者工具-控制台中的函数encodeURIComponent()
encode_keyword = urllib.parse.quote(keyword)

base_url = "https://s.weibo.com/ajax_pic/list"

result_set = []
headers = {"X-Requested-With": "XMLHttpRequest",
           "Cookie": "SUB=_2AkMTtiHDf8NxqwJR\
           mP4RzG3iaIxxyQnEieKl6tAYJRMxHRl-yT9vqk0JtRB6ODYPLQ7YC__A_SRCtgAAF-Sjjg6we04e;",
           "Referer": f"https://s.weibo.com/pic?q={encode_keyword}&Refer=weibo_pic"}

# 不要太大，因为有图片，所以会生成比较大size的文件，容易卡
for i in range(1, 6):
    now_temp = time.time() * 1000
    now = round(now_temp)
    target_url = f"{base_url}?q={encode_keyword}&page={i}&_t=0&_rnd={now}"
    print(f"请求的地址是 {target_url}")
    resp = requests.get(url=target_url, headers=headers).json()
    # print(resp)
    pic_list = resp['data']['pic_list']
    if pic_list:
        for pic in pic_list:
            temp_pic = {
                'title': pic['text'],
                'url': pic['original_pic'],
                'created_at': pic['created_at'],
                'user_name': pic['user']['name'],
                'user_url': pic['user']['profile_url']
            }
            result_set.append(temp_pic)
    else:
        break

print(f"爬取的数据条数{len(result_set)}~")


# with open('结果集.txt', 'w', encoding='utf-8') as f:
#     f.write(str(result_set))


def download_img(url, img_name):
    img_bytes = requests.get(url).content
    with open(f"{img_name}.jpg", 'wb') as f:
        f.write(img_bytes)


# with open('结果集.docx', 'w', encoding='utf-8') as f:
#     f.write(f"数据量：{len(result_set)}条~\n\n\n")
#     counter = 0
#     for pic in result_set:
#         counter += 1
#         f.write(f"{f'分界线{counter}'.center(50, '=')}\n")
#         f.write(f"{pic['title']}\n")
#         f.write(f"{pic['url']}\n\n")
#         # download_img(f"https:{pic['url']}", counter)
#     print("Success!!!")

# only_today = False
doc = Document()
doc.add_paragraph(f"一共有数据{len(result_set)}条~")
counter = 0
for pic in result_set:
    if pic['url']:
        counter += 1
        download_img(f"https:{pic['url']}", counter)
        divider = f"\n{f' 分界线{counter} '.center(60, '=')}"
        text = f"{pic['title']} —— Created at {pic['created_at']} By {pic['user_name']}({pic['user_url']})\n\n"
        doc.add_paragraph(f"{divider}")
        doc.add_picture(f"{counter}.jpg", width=Inches(5.5))
        url_backup = f"{pic['url']}\n可以用apifox请求访问，浏览器直接访问会报403"
        doc.add_paragraph(f"{text}{url_backup}{divider}")
        if os.path.exists(f"{counter}.jpg"):
            os.remove(f"{counter}.jpg")


time_stamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
doc.save(f'{keyword}结果集{time_stamp}.docx')
print("Success!!!".center(50, '√'))
