from imports import *
import threading,time, queue

class crawl_thread(threading.Thread):
    def __init__(self, threadID, tasks):
        threading.Thread.__init__(self)
        self.threadID = threadID
        self.tasks = tasks

    def run(self):
       self.crawl_spider()

    def crawl_spider(self):
        global imagesLink
        while True:
            if self.tasks.empty():
                break
            else:
                placeName = self.tasks.get()
                placeId = placesId[indexId[placeName]]

                url = "https://you.ctrip.com/place/" + placeId + '.html'
                print('\n正在执行爬虫线程[ ' + str(self.threadID) + ' ]...' + 'target -> ' + url)
                # 获取html
                html = bs(rq.get(url=url, headers=Headers).text, 'html.parser')  # soup解析html
                # 提取所有图片链接
                html = [str(x) for x in html.select('.imgbackgbox')]
                print('\n爬虫线程[ ' + str(self.threadID) +' ]结束...爬取成功!' )
                dataQueue.put([placeName, html])


class parser_thread(threading.Thread):
    def __init__(self, threadID, tasks):
        threading.Thread.__init__(self)
        self.threadID = threadID
        self.tasks = tasks

    def run(self):
        while not flag:
            try:
                data = self.tasks.get(False)
                if not data:
                    pass
                self.parse_data(data)
                self.tasks.task_done()
            except Exception as e:
                pass


    def parse_data(self, data):
        global imagesLink
        print('\n正在执行解析线程[ ' + str(self.threadID) + ' ]...' + 'target -> ' + data[0])
        result = re.findall('src="([^"]+)"', ''.join(data[1]))
        imagesLink[data[0]] = result        # 保存结果
        print('\n解析线程[ ' + str(self.threadID) + ' ]结束...解析完毕!')

dataQueue = queue.Queue()
flag = False
imagesLink = {}
def work():
    tasksQueue = queue.Queue(20)         # 任务队列
    # 1. 创建任务队列
    for task in area:
        tasksQueue.put(task)
    
    # 2. 创建3个爬虫线程
    crawl_threads_list = []
    for thread_id in range(5):
        thread = crawl_thread(thread_id, tasksQueue)  # 创建
        thread.start()                               # 启动
        crawl_threads_list.append(thread)            # 加入
        
    # 3. 创建3个解析线程
    parser_threads_list = []
    for thread_id in range(5):
        thread = parser_thread(thread_id, dataQueue) # 创建
        thread.start()                              # 启动
        parser_threads_list.append(thread)           # 加入
    
    # 4.等待队列 先完成网页爬取
    while not tasksQueue.empty():  # 判空
        pass
    # 5. 等待所有爬虫线程结束
    for t in crawl_threads_list:
        t.join()
    # 6. 等待队列, 解析线程的执行
    while not dataQueue.empty():
        pass
    # 7. 退出
    global flag
    flag = True
    # 8. 等待所有解析线程结束
    for t in parser_threads_list:
        t.join()
    print('Over!')