import csv
import json
import time
import random
import itertools
import os

import request_send

from lxml import etree

headers = {
    "User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Mobile Safari/537.36 Edg/122.0.0.0"
}

url = "https://www.kuakeba.com/"

response = request_send.send_request(url, headers=headers)

tree = etree.HTML(response)

classify = {}

# 获取种类
kinds = tree.xpath('(//ul[@class="sub-menu"])[1]/li/a')

for kind in kinds:
    classify[kind.text] = kind.get("href")

keys = list(classify.keys())
for i, key in enumerate(keys):
    print(f"{i+1}.{key}")

choice = int(input("请输入你想要的资源分类的序号哦！："))

if 1 <= choice <= len(keys):
    kind_link = classify[keys[choice - 1]]
    print(f"你选择的是{keys[choice - 1]}，链接是{kind_link}")
else:
    print("数字错误！")

csv_file = os.path.join(
    "resources", "kua_ke_data", "resources_data", f"{keys[choice - 1]}.csv"
)

category = keys[choice - 1]

# 获取资源页面链接

# 把资源页面链接保存到json文件中，方面下次直接读取
json_file = os.path.join(
    "resources", "kua_ke_data", "resources_links", f"{keys[choice - 1]}.json"
)

with open(json_file, "r", encoding="utf-8") as f:
    matches = json.load(f)

first_10_items = list(matches.items())[:1]

for item in first_10_items:
    next_url = item[1]  # 获取 URL

    # 获取资源页面所需数据

    response = request_send.send_request(next_url, headers=headers)

    tree = etree.HTML(response)

    title = "".join(tree.xpath('//h1[@class="article-title"]/a/text()')).replace(
        ",", "，"
    )

    link = "".join(
        tree.xpath('//article[@class="article-content"]/p[last()]/a[1]/@href')
    )

    description = (
        tree.xpath(
            'string(//article[@class="article-content"]/h2[contains(text(),"简介")]/following-sibling::*[1])'
        )
        .replace("\n", " ")
        .replace(",", "，")
    )
    # 保存数据

    with open(csv_file, "a+", newline="", encoding="utf-8") as f:
        f.seek(0)  # 移动到文件的开始
        if not f.read(1):  # 如果文件为空
            writer = csv.writer(f)
            writer.writerow(["标题", "描述", "类别", "链接"])  # 写入标题行
        f.seek(0, 2)  # 移动到文件的末尾
        writer = csv.writer(f)
        writer.writerow([title, description, category, link])  # 写入数据行

    # 随机停止2-3秒
    time.sleep(random.uniform(2, 3))
print("数据已经全部保存完毕！")
