import requests,os,time,csv
from lxml import etree

'''
爬取后会在同级目录下创建一个weibo.csv的文件进行储存，可以在第61行进行更改，需要存储数据库自行链接
程序优化潜力巨大，至少有3秒的优化空间

'''

def down(url):
    headers = {
        "authority": "s.weibo.com",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        "cache-control": "max-age=0",
        "dnt": "1",
        "referer": "https://weibo.com/",
        "sec-ch-ua": "^\\^Chromium^^;v=^\\^104^^, ^\\^",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "^\\^Windows^^",
        "sec-fetch-dest": "document",
        "sec-fetch-mode": "navigate",
        "sec-fetch-site": "same-site",
        "sec-fetch-user": "?1",
        "upgrade-insecure-requests": "1",
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.81 Safari/537.36 Edg/104.0.1293.54"
    }
    cookies = {
        "SINAGLOBAL": "6440699255737.998.1655463896779",
        "PC_TOKEN": "6af3e72e20",
        "SUBP": "0033WrSXqPxfM725Ws9jqgMF55529P9D9Whgvbp6bSJVKl7kGo8W-GGN5JpX5KMhUgL.Fo27eh5RSoz4e0B2dJLoIpYLxK-L1KzL1K-LxK-L1KzL1K-LxK-L1KzL1K-71h27",
        "ALF": "1692338009",
        "SSOLoginState": "1660802011",
        "SCF": "AnrTJwesRhdJF34qWo3a9VwkevG7hk-SIkz2RFSJffmjmxFXH-XEBFeMiPHLk5kD6LuInxj5ax7MXNhilUNtRzI.",
        "SUB": "_2A25P-aOMDeRhGedO61IZ9izFyDiIHXVsjpJErDV8PUNbmtAKLUbtkW9NIGHqD2CNIgNiV4tkP0Yky1iNO4D_EuSv",
        "_s_tentry": "weibo.com",
        "Apache": "8453089793043.646.1660802067283",
        "ULV": "1660802067384:3:1:1:8453089793043.646.1660802067283:1655471279352"
    }
    res = requests.get(url,headers=headers,cookies=cookies)
    return res

# 文章路径 //*[@id="pl_feedlist_index"]/div[2]/div[2]/div[2]/div[1]/div[2]/p[2]

def func(res):
    n = 1
    tree = etree.HTML(res)
    div_list = tree.xpath('//*[@id="pl_feedlist_index"]/div[2]/div')
    for div in div_list:
        account = div.xpath('.//div[@class="info"]/div[2]//a/text()')
        if len(account) == 0:  #如果用户名为空（广告）则跳过本轮循环
            continue
        content = ''.join(div.xpath('.//p[@node-type="feed_list_content_full"]//text()')).strip().replace('\n','')
        if len(content) == 0: # 如果没有获取到展开后的文本，说明文本不需要展开，则获取非展开文本
            content = ''.join(div.xpath('.//p[@node-type="feed_list_content"]//text()')).strip().replace('\n', '').replace('                                                    ','').replace(' ​','')
        sour = ([account,content])
        print(account,content)

        print(n)
        n+=1
        with open('./weibo.csv',mode='a+',newline='',encoding='utf-8') as f:
            writer = csv.writer(f)
            writer.writerow(sour)

def menu(kw):
    os.system('cls')
    print('---------------------------')
    print(f'----您正在搜索的内容是{kw}----')
    print('---------------------------')
    print('###----1，一次性爬取X页----###')
    print('###----2，爬取第X页内容----###')
    print('###----请输入序号进行选择---###')
    opt = eval(input('>>>'))  # 不用eval的话输入1无法正确选择
    return opt

if __name__ == '__main__':
    kw = input('请输入您要搜索的微博内容->>')
    opt = menu(kw)
    page = eval(input('请输入需要爬取的页数->>>'))
    if opt == 1:
        print('您选择了模式一')
        print('3秒后开始爬取')
        time.sleep(3)
        for i in range(page):
            url = f'https://s.weibo.com/weibo?q={kw}&page={i}'
            res = down(url).text
            func(res)
    else:
        print('您选择了模式二')
        print('3秒后开始爬取')
        time.sleep(3)
        url = f'https://s.weibo.com/weibo?q={kw}&page={page}'
        res = down(url).text
        func(res)
    # print(res)