# 导入所需的库
import json
import requests
import time


# 定义爬取微博用户信息的函数
def scrape_weibo(url: str):
    '''爬取相关鲜花服务商的资料'''
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36",
        "Referer": "https://weibo.com"
    }
    cookies = {
        "cookie": '''SINAGLOBAL=5008962874775.458.1711216467482; SCF=ArxCl9AdJxViGtueBxd7G1fkZLihJDrpPycaQUpwskcJxP19P_sigmw6XTk5E-FQMCfgekSWblcBJEo15_87fW8.; SUB=_2A25KQ10nDeRhGeFL6VUY-S3FzTqIHXVpIdDvrDV8PUNbmtANLUemkW9NQirWiJhdDjSH9nZXNwA5OCxDi8s4rIMD; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WWUJleMpEVAYavg0ZA1Hkvo5NHD95QNSKzN1K.01KqcWs4Dqcjci--Xi-zRiKLhi--4iK.Ri-z0i--ci-82i-20i--fiKLhi-2Ri--fiKyWi-8Wi--fiKyWiKLF; ALF=02_1735309943; _s_tentry=passport.weibo.com; Apache=6354700235235.397.1732717945261; ULV=1732717945387:3:1:1:6354700235235.397.1732717945261:1724057701799; PC_TOKEN=17247ecc48'''
    }
    response = requests.get(url, headers=headers, cookies=cookies)
    time.sleep(3)  # 加上3s 的延时防止被反爬
    return response.text


# 根据UID构建URL爬取信息
def get_data(id):
    url = "https://weibo.com/ajax/profile/detail?uid={}".format(id)
    html = scrape_weibo(url)
    response = json.loads(html)

    return response
