import requests
from bs4 import BeautifulSoup
import pandas as pd
from datetime import datetime, timedelta

# 初始化数据存储列表
dataa = []

# 定义主函数
def main_pull(keyword_with_titles, days):
    conuter = 0
    c_num = 0
    # 获取今天的日期
    today = datetime.today()

    # 计算指定天数前的日期
    days_ago = today - timedelta(days=(days-1))

    # 格式化日期为字符串
    k = days_ago.strftime('%Y-%m-%d')
    j = today.strftime('%Y-%m-%d')

    # 初始化日期列表
    dates = []
    dt = datetime.strptime(k, "%Y-%m-%d")

    # 循环生成日期列表
    while dt.strftime("%Y-%m-%d") <= j:
        dates.append(dt.strftime("%Y-%m-%d"))
        dt = dt + timedelta(1)

    # 初始化一个集合来跟踪已遇到的"动态文字"
    seen_texts = set()

    # 关键词列表
    for wor, title in keyword_with_titles:
        # 按日期循环
        for tii in dates:
            # 每天最多抓取25页的数据
            for i in range(1, 26):
                # 构建URL
                url = f'https://s.weibo.com/weibo?q={wor}&typeall=1&suball=1&timescope=custom:{tii}-0:{tii}-23&Refer=g&page={i}'

                # 设置请求头
                headers = {
                    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
                    'accept-encoding': 'gzip, deflate, br, zstd',
                    'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
                    'cache-control': 'max-age=0',
                    'cookie': 'SUB=_2Akxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx; SUBP=0033WrSXqPxfxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx; _s_tentry=weibo.com; Apache=xxxxxxxxxxxxxxxxxxxxxxxx; SINAGLOBAL=xxxxxxxxxxxxxxxxxxxxxxxxx; ULV=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx; WBtopGlobal_register_version=xxxxxxxxxx',
                    'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126", "Microsoft Edge";v="126"',
                    'sec-ch-ua-mobile': '?0',
                    'sec-ch-ua-platform': '"Windows"',
                    'sec-fetch-dest': 'document',
                    'sec-fetch-mode': 'navigate',
                    'sec-fetch-site': 'same-site',
                    'sec-fetch-user': '?1',
                    'upgrade-insecure-requests': '1',
                    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0'
                }

                # 创建会话对象
                ss = requests.session()
                ss.keep_alive = False

                # 尝试请求页面，处理请求异常
                while True:
                    try:
                        html0 = ss.get(url=url, headers=headers, timeout=(3, 3)).text
                        break
                    except requests.exceptions.RequestException:
                        pass

                # 解析HTML内容
                soup = BeautifulSoup(html0, 'html.parser')
                ddd = soup.find_all('div', class_='card-wrap')

                # 遍历微博卡片
                for d in ddd:
                    conuter += 1
                    dic = {}

                    # 获取点赞数
                    try:
                        dz = d.find('button', class_='woo-like-main').get_text()
                        dic['like'] = int(dz)
                    except:
                        dic['like'] = 0

                    # 获取动态文字内容
                    p = d.find('p', class_='txt')
                    if p:
                        text = p.get_text(strip=True)
                        dic['text'] = text
                    else:
                        dic['text'] = ""

                    # 添加日期信息
                    dic['date'] = tii

                    # 添加对应的标题文本
                    dic['title'] = title

                    # 仅在字典不为空且"动态文字"字段不为空时，添加到数据列表中
                    if dic and dic['text'] and dic['text'] not in seen_texts:
                        dataa.append(dic)
                        seen_texts.add(dic['text'])

            c_num += 1
            print(f"已爬取关键词：{wor}，日期：{tii}, 进度：{c_num}/{len(keyword_with_titles) * (days)}")

    # 将数据列表转换为DataFrame
    df = pd.DataFrame(dataa)

    # 按照点赞量降序排序
    df = df.sort_values(by='like', ascending=False)

    # 添加自增的 "id" 列
    df['id'] = range(1, len(df) + 1)

    # 将DataFrame写入Excel文件
    writer = pd.ExcelWriter(f'../public/data/tables/data_orin.xlsx', engine='xlsxwriter')
    df.to_excel(writer, index=False)
    writer.close()
    print("去重后数据总量：", len(dataa))
    print("爬出的点赞量和动态文字经清洗后已存入表格：data_orin.xlsx")

# 主函数入口
if __name__ == '__main__':
    # 示例关键词和对应标题
    main_pull([('示例关键词', '示例标题')], 7)
