import json
import os

import request_send

from lxml import etree


# 第一期周刊的链接
urls = "https://weekly.howie6879.com/2021/08-16~08-20.%E8%80%81%E8%83%A1%E7%9A%84%E5%91%A8%E5%88%8A%EF%BC%88%E7%AC%AC001%E6%9C%9F%EF%BC%89.html"

# 链接保存在weeklyLinks中
weekly_links = []

# 请求头
headers = {
    "User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Mobile Safari/537.36 Edg/122.0.0.0"
}

response = request_send.send_request(urls, headers)

# 数据解析
tree = etree.HTML(response)

# 爬取页面中其他日期周刊的链接
sidebars = tree.xpath(
    '//div[@class="md-sidebar__inner"]//ul[@class="md-nav__list"]//a/@href'
)

# 只保留包含%的链接
weekly_links = [sidebar for sidebar in sidebars if "%" in sidebar]

# 将以'..'开头的链接替换为完整链接
weekly_links = [
    (
        link.replace("..", "https://weekly.howie6879.com/")
        if link.startswith("..")
        else link
    )
    for link in weekly_links
]

# 处理2021年的链接
# 获取第一个链接的长度
first_link_len = len(weekly_links[0]) if len(weekly_links) > 0 else 0

# 将长度不足的链接补齐
weeklyLinks = [
    (
        link
        if len(link) == first_link_len
        else "https://weekly.howie6879.com//2021/" + link
    )
    for link in weekly_links
]


while True:
    url = urls

    # 请求头
    headers = {
        "User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Mobile Safari/537.36 Edg/122.0.0.0"
    }

    # 发请求
    response = request_send.send_request(url, headers)

    # 数据解析
    tree = etree.HTML(response)

    # 获取标题
    title = tree.xpath("//h1/text()")[0]

    # 获取<h1>标题作为文件名
    filename = title + ".json"

    # 使用XPath找到所有的h2, h3, p, ul标签
    tags = tree.xpath("//h2 | //h3 | //p | //ul")

    # 初始化当前种类
    current_category = None

    # 创建一个字典，用于保存解析后的数据，格式为{'category': [{'name': 'item1', 'description': 'description1', 'image': 'image1', 'link': 'link1'}, {'name': 'item2', 'description': 'description2', 'image': 'image2', 'link': 'link2'}, ...]}
    # 字典的键是种类，值是列表，列表中的每个元素是一个字典，字典中包含了每个条目的名称、描述、图片和链接
    items = {}

    # 处理页面标签
    for tag in tags:
        if tag.tag == "h2":
            # tag.itertext()是一个生成器，它会生成标签及其所有子标签的文本内容
            current_category = "".join(tag.itertext())
            items[current_category] = []
        else:
            if current_category is None:
                continue
            elif tag.tag == "h3":
                item = {
                    "name": "".join(tag.itertext()),
                    "description": "",
                    "image": None,
                    "link": None,
                }
                link = tag.xpath(".//a/@href")
                if link:
                    item["link"] = link[0]
                items[current_category].append(item)
            elif tag.tag == "p":
                text = "".join(tag.itertext())
                if items[current_category]:
                    items[current_category][-1]["description"] += text
                image = tag.xpath(".//img/@src")
                if image:
                    items[current_category][-1]["image"] = image[0]
            elif tag.tag == "ul":
                li_texts = tag.xpath(".//li/text()")
                if li_texts and items[current_category]:
                    items[current_category][-1]["li_texts"] = li_texts

    # 删除"说明"键
    if "✍️ 说明" in items:
        del items["✍️ 说明"]

    # 将字典保存为JSON文件
    try:
        # 创建文件夹weekly_data,存储爬取的数据
        if not os.path.exists("resources/weekly_data"):
            os.makedirs("resources/weekly_data")

        with open(
            os.path.join("resources", "weekly_data", filename), "w", encoding="utf-8"
        ) as f:
            json.dump(items, f, ensure_ascii=False, indent=4)
        print(f"{filename}保存成功！")

    except Exception:
        print(f"{filename}保存失败！")

    ans = input("哥们还想爬吗？(y/n):")
    if ans.lower() != "y":
        print("拜拜！")
        break
    else:
        date1 = input("请输入你想爬取的期数:")
        date1 = int(date1)
        # 补0并且转为字符串
        date1_str = "{:03}".format(date1)
        date2 = "%AC" + date1_str + "%"
        for link in weeklyLinks:
            if date2 in link:
                urls = link
                break
        else:
            print("没有找到对应的期数，可能还没出哦！")
            continue
