# -*- coding: utf-8 -*-
################################################################################
#
# Copyright (c) 2014 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
迷你定向抓取器主函数。

Authors: caopeirui(caopeirui@baidu.com)
Date:	2018/04/19 20:10:41
"""
import os
import time
import threading
import seedfile_load as sl
import crawl_thread as ct
from config_load import config_dict
from crawl_queue_manage import url_queue

if __name__ == "__main__":
    start_sec = time.time()

    # 创建保存网页的文件夹
    output_directory = config_dict.get('output_directory')
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)

    # 初始把种子文件的url传入到url_queue队列中
    initial_url_list = sl.get_url_list(config_dict.get('url_list_file'))
    for _ in initial_url_list:
        # 队列里面存元组 (爬取深度, url)
        url_queue.put((0, _), block = False)

    # 跑爬虫多线程
    thread_count = int(config_dict.get('thread_count'))
    thread_list = []
    for i in range(thread_count):
        t = threading.Thread(target=ct.crawl_parse_save, args=(i,))
        t.start()
        thread_list.append(t)
    
    for _ in thread_list:
        _.join()
    
    end_sec = time.time()
    print('cost_time: %s s' % (end_sec - start_sec))


