import sqlite3
import time

import urllib.request
import requests

import re
from lxml import etree

import sqlite3


class MenuParse:
    def get_label_data(self):
        url = "https://www.agedm.org/catalog/all-all-all-all-all-time-1"
        headers = {
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
        }
        datas = []
        req = urllib.request.Request(url, headers=headers)
        response = urllib.request.urlopen(req)
        content = response.read().decode()
        items = re.findall(r'<a href="all-all-all-(.*?)-all-time-1" class="filter_list_item btn btn-sm ">', content)
        index = 101
        for item in items:
            datas.append((index, item))
            index += 1
        return datas

    def save_label_data(self, datas):
        con = sqlite3.connect("../mysite/db.sqlite3")
        cur = con.cursor()

        cur.executemany("insert into main_label (id, title) values (?, ?)", datas)

        con.commit()
        cur.close()
        con.close()

    def get_list_data(self):
        headers = {
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
        }
        res = requests.get("https://www.agedm.org/recommend/2", headers=headers)
        content = res.content.decode("utf8")
        tree = etree.HTML(content)
        items = tree.cssselect(".py-2 .stretched-link")
        urls = []
        for item in items:
            urls.append(f"{item.attrib['href']}")
        return urls

    def down_detail_data(self, url):
        res = requests.get(url)
        tree = etree.HTML(res.content.decode("utf8"))
        imgs = tree.cssselect(".video_detail_cover img")
        if len(imgs) <= 0:
            return
        # print(img.attrib['data-original'])
        img_src = imgs[0].attrib['data-original']
        print(img_src)
        img_name = re.search(r'([^/]+\.jpg)$', img_src).group()
        print(img_name)
        headers = {
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
        }
        img_res = requests.get(img_src, headers=headers)
        with open(f"./image/{img_name}", "wb") as f:
            f.write(img_res.content)

    def get_and_save_detail_data(self, url):
        res = requests.get(url)

        headers = {
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
        }
        req = urllib.request.Request(url, headers=headers)
        response = urllib.request.urlopen(req)
        content = response.read().decode()

        tree = etree.HTML(res.content.decode("utf8"))
        imgs = tree.cssselect(".video_detail_cover img")
        if len(imgs) <= 0:
            return
        # print(img.attrib['data-original'])
        img_src = imgs[0].attrib['data-original']
        img_name = re.search(r'([^/]+\.jpg)$', img_src).group()
        age_name = re.search(r'([^/]+\.jpg)$', img_src).group(1)[:-4]

        titles = tree.cssselect(".flex-grow-1 .video_detail_title")
        title = titles[0].text
        introduces = tree.cssselect(".flex-grow-1 .py-2")
        introduce = introduces[0].text
        views = re.findall(r'<div class="video_detail_extra_item flex-fill"><i class="bi-fire text-danger"></i>(.*?)</div>', content)
        episodes = tree.cssselect(".video_detail_episode .video_detail_spisode_link")
        episode = len(episodes)
        messages = tree.cssselect(".detail_imform_list .detail_imform_value")
        pub_date = messages[6].text
        category = messages[1].text

        label_str = messages[9].text
        labels = label_str.split()
        print(labels)

        author = messages[4].text
        status = messages[7].text
        link = messages[10].text

        con = sqlite3.connect("../mysite/db.sqlite3")
        cur = con.cursor()

        # 使用 executemany 执行查询
        query_label = "select id from main_label where title = ?"
        label_ids = []

        # 使用循环来执行每个标签的查询
        for label in labels:
            cur.execute(query_label, (label,))
            result = cur.fetchone()  # 获取结果
            if result:
                label_ids.append(result[0])  # 将 ID 添加到列表中
        # 打印结果
        print(label_ids)

        query_cate = "select id from main_category where title = ?"
        cur.execute(query_cate, (category,))
        cate = cur.fetchone()

        query_statu = "select id from main_status where title = ?"
        cur.execute(query_statu, (status,))
        statu = cur.fetchone()

        anime_datas = [title, img_name, age_name, introduce, views[0], episode, pub_date, cate[0], author, statu[0], link]
        datas = [anime_datas]
        print(anime_datas)

        cur.executemany("insert into main_anime (title, imglink, agelink, introduce, views, episode, pub_date, category_id, author, status_id, link ) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", datas)

        # con.commit()

        query_animeid = "select id from main_anime where title = ?"
        cur.execute(query_animeid, (title,))
        animeid = cur.fetchone()
        datas = []
        for label in label_ids:
            datas.append((animeid[0], label))
        print(datas)
        cur.executemany("insert into main_anime_label (anime_id, label_id) values (?, ?)", datas)

        con.commit()
        print("数据上传成功")
        cur.close()
        con.close()



mp = MenuParse()
# #提取标签
# datas = mp.get_label_data()
# mp.save_label_data(datas)

# urls = mp.get_list_data()
# for url in urls:
#     mp.get_and_save_detail_data(url)
#     time.sleep(10)
mp.get_and_save_detail_data("https://www.agedm.org/detail/20240118")

# for url in urls:
#     mp.get_detail_data(url)
#     time.sleep(6)