from threading import Thread
from queue import Queue
from fake_useragent import UserAgent
import requests
from lxml import etree

# 爬虫类
class CrawlerInfo:
    def __init__(self, url_queue, html_queue):
        Thread.__init__(self)
        self.url_queue = url_queue
        self.html_queue = html_queue

    def run(self):
        headers = {
            "User-Agent": UserAgent().random
        }
        while self.url_queue.empty() == False:
            response = requests.get(self.url_queue.get(), headers=headers)
            # print(response.text)
            if response.status_code == 200:
                self.html_queue.put(response.text)
                with open('懂球帝.txt', 'a', encoding='utf-8') as f:
                    f.write(response.text + '\n')


# 解析类
class ParseInfo(Thread):
    def __init__(self, html_queue):
        Thread.__init__(self)
        self.html_queue = html_queue

    def run(self):
        while self.html_queue.empty() == False:
            e = etree.HTML(self.html_queue.get())
            team_icon = e.xpath('//span[@class="team-icon"]/b/text()')
            print(team_icon[0] + " " + team_icon[1] + " " + team_icon[2])


if __name__ == '__main__':
    # 存储url的容器
    url_queue = Queue()
    # 存储内容的容器
    html_queue = Queue()
    base_url = 'https://www.dongqiudi.com/data/{}'
    for i in range(1, 5):
        new_url = base_url.format(i)
        url_queue.put(new_url)
    crawl_list = []
    for i in range(0, 3):
        # 创建一个线程爬虫
        crawl1 = CrawlerInfo(url_queue, html_queue)
        crawl_list.append(crawl1)
        crawl1.run()

    # for num in crawl_list:
    #     num.join()

    print("五大联赛前三名排位：")
    parse = ParseInfo(html_queue)
    parse.run()
