# -*- coding: utf-8 -*-
# @Time    : 2024/5/26 19:38
# @Author  : Wllen
# @Email   : wllen@wllen.com
# @File    : pa.py
# @Software: PyCharm

import json
import random
import requests
from bs4 import BeautifulSoup
from lxml import html

etree = html.etree
from multiprocessing.pool import ThreadPool

title='获取了微博记录数：'
tgi = 0  # 全局变量，用于记录处理的日记数量
proxys = []  # 存储代理IP列表
proxy = ""  # 存储当前使用的代理IP
header = {
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36 Edg/121.0.0.0'}


# 获取代理IP
def get_proxy():
    global proxy
    r = requests.get("https://www.kuaidaili.com/proxylist/1")
    if (r.status_code == 200):
        soup = BeautifulSoup(r.text, 'html.parser')
        # 从页面中提取IP和端口信息
        table = soup.find('table', class_='table table-b table-bordered table-striped')
        if table:
            for row in table.find_all('tr')[1:]:
                cells = row.find_all('td')
                if len(cells) >= 2:
                    ip = cells[0].text.strip()
                    port = cells[1].text.strip()
                    proxys.append(f"{ip}:{port}")


# 获取页面HTML内容，可选地使用代理
def getHtml(url, p):
    global proxy
    if p:
        try:
            # 使用随机选择的代理发送请求
            i = random.randint(0, len(proxys) - 1)
            htmls = requests.get(url, proxies={'http': proxys[i]}, headers=header, timeout=5)
            if htmls is not None:
                return htmls.text
        except Exception as e:
            print(e)
            proxy.remove(proxys[i])  # 移除无效代理
            if len(proxy) < 5:  # 若代理池小于5个，则重新获取代理
                get_proxy(10)
            return getHtml(url, True)
    else:
        # 不使用代理发送请求
        html = requests.get(url, headers=header, timeout=5)
        return html.text


# 主函数，用于控制程序流程
def main():
    print("开始")
    global proxy
    get_proxy()  # 获取代理IP
    print(proxy)
    # 创建线程池并异步执行任务
    pool = ThreadPool(20)
    for num in range(1, 20):
        url = "https://m.weibo.cn/api/container/getIndex?containerid=100103type%3D61%26q%3D%E8%88%94%E7%8B%97%E6%97%A5%E8%AE%B0%26t%3D0&page_type=searchall&page=" + str(
            num)
        try:
            pool.apply_async(run, args=(url,))
        except Exception as e:
            print(e)
    pool.close()
    pool.join()
    print("结束")


# 处理单个页面数据，提取日记并提交至目标URL
def run(url):
    # 获取页面数据
    r = getHtml(url, True)
    data2 = json.loads(r)
    global tgi
    try:
        # 遍历提取每篇日记，并提交到指定地址
        for i in range(0, len(data2['data']['cards'])):
            url = 'https://127.0.0.1/diary'
            diary = data2['data']['cards'][i]['mblog']["text"].replace('/status/', 'https://m.weibo.cn/status/').replace('/n/', 'https://m.weibo.cn/n/')
            d = {'time': '来自微博的日记', 'content': diary}
            r = requests.post(url, data=d)
            tgi += 1
            print(str(tgi) + "--" + data2['data']['cards'][i]['mblog']["text"])
    except Exception as e:
        print(e)


if __name__ == "__main__":
    main()
