# 导入运行爬虫类
import json
from scrapy.crawler import CrawlerProcess
# 导入项目配置获取函数
from scrapy.utils.project import get_project_settings
# 导入需要运行的爬虫类
from video_scrapy.spiders import video_spider,details_spider
import os

# 视频处理函数
def ts_syn():
    # 获取文件路径
    video_path = os.path.abspath('.')
    # C:\PythonProject\video_scrapy\video_scrapy\video
    video_path = os.path.join(video_path,'video')
    for i in os.listdir(video_path):
        # C:\PythonProject\video_scrapy\video_scrapy\video\视频
        video_title_path = os.path.join(video_path, i)
        for ju in os.listdir(video_title_path):
            # C:\PythonProject\video_scrapy\video_scrapy\video\视频\剧集
            video_last_path = os.path.join(video_title_path,ju)
            # 获取ts视频路径
            ts_name_list = []
            for ts_name in os.listdir(video_last_path):
                name = os.path.join(video_last_path, ts_name)
                ts_name_list.append(name)

            # 开始视频合成
            v_path = os.path.join(video_path, '视频.ts')
            f = open(v_path, 'ab')
            for i in ts_name_list:
                with open(i, 'rb') as file:
                    f.write(file.read())
                os.remove(i)
            f.close()

"""
page_size
model
search_get
"""
# 视频json爬虫运行函数
def run_details(page_size,model,search_get):
    settings = get_project_settings()
    settings['ITEM_PIPELINES'] = {
        'video_scrapy.pipelines.DetailsPipeline': 300,
    }

    process = CrawlerProcess(settings=settings) # 导入项目配置
    process.crawl(details_spider.DetailsSpiderSpider,page_size,model,search_get)
    print('爬虫启动')
    process.start()                    # 启动爬虫

# json文件获取函数
def get_json():
    json_path = os.path.abspath('..')
    json_path = os.path.join(json_path,"data_json")
    json_list = []
    for i in os.listdir(json_path):
        for t in  os.listdir(os.path.join(json_path,i)):
            json_path_1 = os.path.join(os.path.join(json_path,i),t)
            with open(json_path_1,"r",encoding="utf-8") as json_file:
                data = json.load(json_file)
            json_list.append(data)

    return json_list

"""
json_list
r_url
"""
# ts视频获取爬虫
def run_video(json_list,r_url):
    settings = get_project_settings()
    settings['ITEM_PIPELINES'] = {
        "video_scrapy.pipelines.VideoScrapyPipeline": 300,
    }
    process = CrawlerProcess(settings=settings)
    process.crawl(video_spider.VideoSpiderSpider,json_list,r_url)
    print('爬虫启动')
    process.start()  # 启动爬虫


if __name__ == '__main__':
    run_details(4,1)