import re
import time
import sqlite3

import scrapy


class VideoSpider(scrapy.Spider):
    name = 'video'
    allowed_domains = ['6cd89.com']
    start_urls = ['https://www.6cd89.com/index/home.html']

    def __init__(self):
        super(VideoSpider, self).__init__()
        self.full_page_crawl = False
        self.categories = [
            "日本AV", "软萌福利姬", "动漫电影", "OnlyFans", "探花主播",
            "麻豆专区", "JVID", "SWAG", "人妖伪娘", "蜜桃精东", "黑料头条", "短视频"
        ]
        self.conn = sqlite3.connect("DB.db")
        self.cur = self.conn.cursor()

    def parse(self, response, **kwargs):
        for category in self.categories:
            category_url = response.xpath(
                f'//section[@id="section-menu"]//a[text()="{category}"]/@href').get()
            yield scrapy.Request(response.urljoin(category_url), callback=self.parse2)

    def parse2(self, response):
        category = response.xpath('//span[@class="cat_pos_l"]/a[3]/text()').get()
        last_page = int(response.xpath('//a[@class="hidden-xs"][last()]/text()').get())
        has_crawl_page = False
        for page in response.xpath('//div[@id="tpl-img-content"]/li/a'):
            page_url = response.urljoin(page.xpath('./@href').get())
            if self.is_exist_page_url(page_url):
                has_crawl_page = True
                continue
            data = {
                "title": page.xpath('./@title').get(),
                "page_url": page_url,
                "image_url": page.xpath('./img/@data-original').get(),
                "release_date": page.xpath('./span/text()').get().strip(),
                "crawl_date": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
                "category": category
            }
            yield scrapy.Request(page_url, meta=data, callback=self.parse3)
        if has_crawl_page and not self.full_page_crawl:
            return
        page_number = re.findall(r".*-(\d+).html", response.url)
        if page_number:
            page_number = int(page_number[0])
            if page_number < last_page:
                next_page_url = re.sub(r"\d+.html", f"{page_number + 1}.html", response.url)
                yield scrapy.Request(next_page_url, callback=self.parse2)
        else:
            next_page_url = response.url[:-5] + "-2.html"
            yield scrapy.Request(next_page_url, callback=self.parse2)



    def parse3(self, response):
        download_url = response.xpath('//tr[@class="app_hide"]//input/@value').get()
        data = {
            "title": response.meta["title"],
            "page_url": response.meta["page_url"],
            "image_url": response.meta["image_url"],
            "release_date": response.meta["release_date"],
            "crawl_date": response.meta["crawl_date"],
            "category": response.meta["category"],
            "download_url": download_url
        }
        self.save_data(data)

    def is_exist_page_url(self, page_url):
        like_url = re.sub("www\..*\.com", "%", page_url)
        sql = f"select * from information where page_url like '{like_url}'"
        self.cur.execute(sql)
        if self.cur.fetchone():
            return True
        else:
            return False

    def save_data(self, data):
        keys = ",".join(data.keys())
        value_format = ",".join(["?"] * len(data))
        values = list(data.values())
        sql = f"insert into information ({keys}) values ({value_format})"
        self.cur.executemany(sql, [values])
        self.conn.commit()
        print("保存成功:", data.get("title"))
