import request_send
import lxml.etree as etree
import json
import os
import time
import csv
import random


url = "https://www.xmwav.com/"

headers = {
    "User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Mobile Safari/537.36 Edg/122.0.0.0"
}

response = request_send.send_request(url, headers=headers)

tree = etree.HTML(response)

classify = tree.xpath('//ul[@class="clearfix zh"]//li/a')

classify_data = [(item.text, item.get("href")) for item in classify]

for i, (text, href) in enumerate(classify_data, start=1):
    print(f"{i},{text},{href}")

choice = int(input("请输入你想要的资源分类的序号："))

if 1 <= choice <= len(classify_data):
    classify_link = classify_data[choice - 1][1]
    print(f"你选择的是:{classify_data[choice-1][0]}，链接是{classify_link}")

else:
    print("序号错误")

# 第二部分：获取资源页面链接
# 把资源页面链接保存到json文件中，方便下次直接读取
# 先处理简单的

csv_file = os.path.join(
    "resources", "xiongmao_music_data", "data", f"{classify_data[choice-1][0]}.csv"
)

json_file = os.path.join(
    "resources", "xiongmao_music_data", "links", f"{classify_data[choice-1][0]}.json"
)

choice_mode = int(input("是否更新页面链接:1.是："))

if choice_mode == 1:
    # 检查文件是否存在，如果不存在则创建
    if not os.path.exists(json_file):
        os.makedirs(os.path.dirname(json_file), exist_ok=True)
        with open(json_file, "w", encoding="utf-8") as f:
            json.dump({}, f)

    with open(json_file, "r", encoding="utf-8") as f:
        matches = json.load(f)

    if matches:
        first_key = next(iter(matches))
    else:
        first_key = None

    # 创建一个新字典,用于存放新的匹配数据
    new_matches = {}

    flag_find_link = False

    page_link = classify_link

    while page_link and not flag_find_link:
        response = request_send.send_request(page_link, headers=headers)
        tree = etree.HTML(response)

        results = tree.xpath(f'//div[@class="list bgb "]/ul//a')

        next_page_link1 = None

        for a in results:
            title = a.get("title")
            link = a.get("href")
            if title == first_key:
                flag_find_link = True
                break
            new_matches[title] = link

        if not flag_find_link:
            next_page_link1 = tree.xpath('//ul[@class="pagination"]/li[last()]/a/@href')

        if next_page_link1:
            page_next_link2 = "https://www.xmwav.com" + next_page_link1[0]
        else:
            page_next_link2 = None

        page_link = page_next_link2

    new_matches.update(matches)

    next_urls = list(new_matches.values())

    # 将matches字典的内容添加到新的字典中
    new_matches.update(matches)

    json_file = os.path.join(
        "resources",
        "xiongmao_music_data",
        "links",
        f"{classify_data[choice-1][0]}.json",
    )

    with open(json_file, "w", encoding="utf-8") as f:
        json.dump(new_matches, f, ensure_ascii=False, indent=4)

# 读取 JSON 文件
with open(json_file, "r", encoding="utf-8") as f:
    links = json.load(f)

# 获取从第xx项开始的所有链接
# links = dict(links.items())
links = dict(list(links.items())[0:])

next_urls = list(links.values())

for next_url in next_urls:
    # 获取资源页面所需数据

    response = request_send.send_request(next_url, headers=headers)

    tree = etree.HTML(response)

    title_1 = tree.xpath('//h1[@class="title"]/text()')

    title_2 = tree.xpath('//h1[@class="title"]/following-sibling::h2[1]/text()')

    title = title_1[0] + title_2[0]

    link = "".join(tree.xpath('//div[@class="info-zi mb15"]/a[last()]/@href'))

    description_1 = tree.xpath('//div[@class=" sx mb15"]/h2')

    description_2 = tree.xpath('//div[@class="lrc"]//article//text()')

    description = description_1[0].text + "".join(description_2)

    description = description.replace("\n", " ").replace("\r", " ")

    # 保存数据

    with open(csv_file, "a+", newline="", encoding="utf-8") as f:
        f.seek(0)  # 移动到文件的开始
        if not f.read(1):  # 如果文件为空
            writer = csv.writer(f)
            writer.writerow(["标题", "链接", "描述"])  # 写入标题行
        f.seek(0, 2)  # 移动到文件的末尾
        writer = csv.writer(f)
        writer.writerow(
            [
                title,
                link,
                description,
            ]
        )  # 写入数据行z

    # 随机停止2-3秒
    time.sleep(random.uniform(10, 11))
print("数据已经全部保存完毕！")
