import time
import requests
from spider.scraper.utils import get_cookie
from datetime import datetime, timedelta
from lxml import etree
import pandas as pd
import json


def get_date_list(today):
    today = datetime.strptime(today, "%Y%m%d").date()
    date_list = []
    for _ in range(7):
        date_list.append(today.strftime("%Y%m%d"))
        today -= timedelta(days=1)
    return date_list


def get_html():
    columns = ["id", "date", "rank", "title", "author", "url", "tags", "likes", "favorites", "views", "comment", "create_time"]
    domain = "https://www.pixiv.net"
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0"
    }
    cookies = {
        "Cookie": get_cookie("../config/cookie.txt")
    }
    proxies = {
        'http': 'http://127.0.0.1:7890/',
        'https': 'http://127.0.0.1:7890/'
    }

    date_list = get_date_list("20240625")

    for date in date_list:
        artworks_df = pd.DataFrame(columns=columns)
        today_rank_url = domain + f"/ranking.php?mode=daily&date={date}"
        response = requests.get(url=today_rank_url, headers=headers, cookies=cookies, proxies=proxies)
        response.encoding = "utf-8"

        res = etree.HTML(response.text)

        top50_artwork_urls = res.xpath('//a[starts-with(@href, "/artworks/") and @class="title"][position() <= 50]/@href')
        rank = 0
        for artwork_url in top50_artwork_urls:
            url = domain + artwork_url
            id = artwork_url.split("/")[-1]

            response = requests.get(url=url, headers=headers, cookies=cookies, proxies=proxies)
            response.encoding = "utf-8"

            res = etree.HTML(response.text)

            title = res.xpath('/html/head/title/text()')[0]
            author = title.split(" ")[1]
            title = title.split(" ")[3][:-3]
            rank += 1
            content = res.xpath('//meta/@content')
            content = json.loads(str(content[-1]))

            tags = [_["tag"] for _ in content["illust"][f"{id}"]["tags"]["tags"]]
            likes = content["illust"][f"{id}"]["likeCount"]
            favorites = content["illust"][f"{id}"]["bookmarkCount"]
            views = content["illust"][f"{id}"]["viewCount"]
            create_time = content["illust"][f"{id}"]["createDate"]
            comment = content["illust"][f"{id}"]["commentCount"]

            artwork_data = {
                "id": id,
                "date": date,
                "rank": rank,
                "title": title,
                "author": author,
                "url": url,
                "tags": [tags],
                "likes": likes,
                "favorites": favorites,
                "views": views,
                "comment": comment,
                "create_time": create_time,
            }

            artworks_df = pd.concat([artworks_df, pd.DataFrame(artwork_data)], ignore_index=True, axis=0)
            print(f"{id}完成")
            time.sleep(2)

        print(artworks_df)

        # 文件名
        file_name = "../data/data.csv"

        # 检查文件是否存在
        try:
            with open(file_name, 'x') as f:
                # 文件不存在，写入列名和数据
                artworks_df.to_csv(file_name, index=False)
        except FileExistsError:
            # 文件已存在，追加数据，不写入列名
            artworks_df.to_csv(file_name, mode='a', header=False, index=False)

    time.sleep(3)
    print("保存完成")


if __name__ == "__main__":
    get_html()
