# coding = utf-8
"""
程序入口
"""
import MyQueue
import Crawl
import threading
from os import path, makedirs


def thread_func(type, my_queue, savepath):
    page = my_queue.pop()
    while page:
        try:
            Crawl.Crawl(type, page, savepath).http_get().html_parse().save()
        except Exception as e:
            print(e)


def collect_input(name):
    while True:
        try:
            if name == 'thread_count':
                result = int(input("请输入要调用线程个数:\n"))
            if name == 'content':
                result = input("请输入要下载内容（ooxx 或者 pic）:\n")
            if name == 'start_page':
                result = int(input("请输入下载起始页码:\n"))
            if name == 'end_page':
                result = int(input("请输入结束页码:\n"))
            if result:
                return result
        except ValueError:
            pass


if __name__ == '__main__':
    content_type = collect_input('content')
    savepath = './pic/' + content_type + '/'
    if not path.exists(savepath):
        makedirs(savepath)
    start_page = collect_input('start_page')
    end_page = collect_input('end_page')
    thread_count = collect_input('thread_count')

    my_queue = MyQueue.MyQueue(start_page, end_page)
    for i in range(thread_count):
        my_thread = threading.Thread(target=thread_func, args=(content_type, my_queue, savepath))
        my_thread.start()
