import threading
import time 
from . import crawler_360, crawler_baidu, crawler_sougou
import os
import time

# 运行多线程爬虫
def doCrawler(keyword, num_baidu, num_sougou, num_360, save_path):
    time0 = time.strftime('%Y-%m-%d-%H-%M-%S_', time.localtime(time.time()))
    time1 = time.time()

    print(num_baidu, num_sougou, num_360)

    # 如果指定的文件夹不存在就创建文件夹
    if not os.path.exists(save_path):
        os.mkdir(save_path)

    t1 = None
    t2 = None
    t3 = None

    # 子线程--百度爬虫
    if(num_baidu > 0):
        newT=[keyword, num_baidu, save_path, time0]
        t1 = threading.Thread(target=crawler_baidu.func,args=(newT,))
        t1.start()
        
    # 子线程--搜狗爬虫
    if(num_sougou > 0):
        newT=[keyword, num_sougou, save_path, time0]
        t2 = threading.Thread(target=crawler_sougou.func,args=(newT,))
        t2.start()
    
    # 子线程--360爬虫
    if(num_360 > 0):
        newT=[keyword, num_360, save_path, time0]
        t3 = threading.Thread(target=crawler_360.func,args=(newT,))
        t3.start()

    if(t1 != None):
        t1.join()
    if(t2 != None):
        t2.join()
    if(t3 != None):
        t3.join()

    time2 = time.time()
    return time2 - time1

if __name__ == '__main__':
    doCrawler('奥特曼', 50, 20, 0 , 'images')
