# import scrapy
#
# from ScrapyObject.spiders.utils.url_utils import *
#
# '''
# scrapy crawl ydap -o ydap.json
# https://77maott.com/index.html
# https://t33.cdn2020.com/video/m3u8/2025/09/20/65ea276d/index.m3u8
# '''
#http://7849ck.cc/
#
# class YdapSpider(scrapy.Spider):
#     name = "ydap"
#     # 前缀
#     prefix = 'https://'
#     # 中缀
#     website = '77maott'
#     # 后缀
#     suffix = '.com/'
#     allowed_domains = [website + '.com']
#     start_urls = [prefix + website + suffix + "index.html"]
#
#     def __init__(self):
#         self.i = 0
#
#     def parse(self, response):
#         content = get_data(response)
#         video_list = get_video_url_one(content)
#         tag_list = response.xpath("//h5[@class='tags h6-md']/a/text()").extract()
#         name_list = response.xpath("//div[@class='header-left']/h4/text()").extract()
#         if len(video_list) and len(tag_list) and len(name_list):
#             self.i = self.i + 1
#             yield get_video_item(id=self.i, tags=tag_list[0], url=response.url, name=name_list[0], pUrl='', vUrl=format_url_one(video_list[0]))
#         url_list = response.xpath("//div[@class='img-box cover-md']/a/@href").extract()
#         picture_list = response.xpath("//div[@class='img-box cover-md']/a/img/@data-original").extract()
#         if len(url_list) and len(picture_list):
#             for index, value in enumerate(url_list):
#                 self.i = self.i + 1
#                 yield get_video_item(id=self.i, url=split_joint(self.prefix + self.website + self.suffix, url_list[index]), pUrl=picture_list[index])
#         # 从网页中提取url链接
#         url_list = get_url(content)
#         # 提取url
#         for url in url_list:
#             if url.startswith('/') or url.startswith('.html'):
#                 yield scrapy.Request(split_joint(self.prefix + self.website + self.suffix, url), callback=self.parse)
#             elif url.startswith('http') or url.startswith('www'):
#                 yield scrapy.Request(url, callback=self.parse)
