import os
from datetime import datetime
from tqdm import tqdm
import requests
import pandas as pd

# 搜索关键词
keywords = []
cookie = ''
res_set = set()


def timestamp_to_custom_format(timestamp_str):
    # 将字符串类型的时间戳转换为整数
    timestamp = int(timestamp_str) / 1000  # 将毫秒转换为秒

    # 将秒级时间戳转换为具体的日期和时间
    create_datetime = datetime.fromtimestamp(timestamp)

    # 使用 strftime() 方法将日期和时间格式化为指定的字符串格式
    formatted_date = create_datetime.strftime("%Y-%m-%d")

    # 返回格式化后的日期字符串
    return formatted_date


def read_cookie():
    global cookie
    with open("cookie.txt", "r", encoding="utf-8") as file:
        cookie = file.readline()


def read_keywords():
    global keywords
    # 打开文件，使用读取模式
    with open("keywords.txt", "r", encoding="utf-8") as file:
        # 使用 readlines() 方法读取文件的所有行，并将其存储到列表中
        string_list = [line.strip() for line in file.readlines()]

    # 关闭文件
    # 输出读取到的列表内容
    keywords = string_list


def read_links(keyword):
    # 打开文件，使用读取模式
    with open(os.path.join("links_data", keyword + ".txt"), "r", encoding="utf-8") as file:
        # 使用 readlines() 方法读取文件的所有行，并将其存储到列表中
        return [line.strip() for line in file.readlines()]


if __name__ == '__main__':
    # 读取登录信息
    read_cookie()
    headers = {
        # 假装自己是浏览器
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/73.0.3683.75 Chrome/73.0.3683.75 Safari/537.36',
        # 把你刚刚拿到的Cookie塞进来
        'Cookie': cookie,
    }
    session = requests.Session()

    # 读取关键词
    read_keywords()

    # 读取评论数据
    for keyword in keywords:
        links = read_links(keyword)
        userNames = []
        commentDetails = []
        commentTimes = []
        ipZones = []
        for link in tqdm(links, desc=keyword + ' - 评论数据 - 爬取进度'):
            note_id = link.split('/')[-1]
            response = session.get(
                f'https://edith.xiaohongshu.com/api/sns/web/v2/comment/page?note_id={note_id}&cursor=&top_comment_id=&image_formats=jpg,webp,avif',
                headers=headers)
            # 检查响应状态码是否为 200 (成功)
            if response.status_code == 200:
                # 获取响应数据
                data = response.json()  # 假设响应数据是 JSON 格式
                # 处理获取到的数据
                if data['code'] == -100 or data['success'] == False:
                    print('登录已过期，请重新设置cookie！')
                    exit()
                # 访问 data 字典中的 comments 数组
                comments_array = data['data']['comments']
                # 现在你可以对 comments_array 进行进一步的处理，比如遍历数组并输出每个评论的内容等
                for comment in comments_array:
                    try:
                        # print(comment)
                        # 取值
                        userName = comment['user_info']['nickname']
                        commentDetail = comment['content']
                        commentTime = timestamp_to_custom_format(comment['create_time'])
                        try:
                            ipZone = comment['ip_location']
                        except Exception as e:
                            ipZone = '未知'

                        userNames.append(userName)
                        commentDetails.append(commentDetail)
                        commentTimes.append(commentTime)
                        ipZones.append(ipZone)
                    except Exception as e:
                        pass
            else:
                # 如果请求失败，打印错误信息
                print(f"Failed to fetch data. Status code: {response.status_code}")

        # 创建 DataFrame
        df = pd.DataFrame({
            '用户评论内容': commentDetails,
            '用户名': userNames,
            '用户评论时间': commentTimes,
            '用户属地': ipZones
        })

        # 保存到 Excel 文件
        df.to_excel(os.path.join('res_data', keyword + '.xlsx'), index=False)
