import threading

from concurrent_study.blog_spider import urls, craw
from concurrent_study.common import cal_timer


@cal_timer
def single_thread():
    for i in urls:
        craw(i)


# 执行顺序是不固定的
@cal_timer
def multi_thread():
    threads = []
    for i in urls:
        craw_thread = threading.Thread(target=craw, args=(i,))
        threads.append(craw_thread)
    for t in threads:
        t.start()
    for t in threads:
        t.join()


if __name__ == "__main__":
    single_thread()
    multi_thread()
