import requests
from threading import Thread, Semaphore
import os
from lxml import etree
import time


# 二级站点爬虫
class CategoryListSpider(Thread):
    def __init__(self, title, son_dic):
        super(CategoryListSpider, self).__init__()
        self.title = title
        for i in son_dic:
            self.son_title = i
            self.href = son_dic[self.son_title]
        self.html = ''

    def get_post_list(self):
        response = requests.get(self.href)
        self.html = response.content
        print('ok!1')

    def run(self):
        self.get_post_list()


# 站点列表 ====> 分类新闻  ====> 具体的新闻  ======>内容
class GuideSpider(Thread):
    def __init__(self):
        super(GuideSpider, self).__init__()
        self.guideUrl = 'http://news.sina.com.cn/guide/'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36'
        }
        self.category_list = {}  # [{'category':url},]
        self.html = ''

    def get_category_list(self):
        response = requests.get(self.guideUrl, headers=self.headers)
        self.html = response.content

    def parse_category_list(self):
        text = etree.HTML(self.html)
        # 一级
        category_list = text.xpath('//*[@id="tab01"]/div')
        for category in category_list:
            # {title:[{'category':href},{'category':href}]}
            try:
                title = category.xpath('./h3/a/text()')[0]
                self.category_list[title] = []
                category_items = category.xpath('./ul/li/a')
                for item in category_items:
                    dic_son = {}
                    category_name = item.xpath('./text()')[0]
                    href = item.xpath('./@href')[0]
                    dic_son[category_name] = href
                    self.category_list[title].append(dic_son)

            except Exception:
                pass

    # 分配子爬虫
    def allot_Spider(self):
        sp_list = []
        num = 0
        for title in self.category_list:
            if not os.path.exists(f'data/{title}'):
                os.mkdir(f'data/{title}')
            for son_dic in self.category_list[title]:
                if num == 10:
                    time.sleep(8)
                    num = 0
                else:
                    sp = CategoryListSpider(title, son_dic)
                    sp.start()
                    sp_list.append(sp)
                    num += 1
        for sp in sp_list:
            sp.join()
        print('二级标题子爬虫分配完毕!')

    def run(self):
        self.get_category_list()
        self.parse_category_list()
        self.allot_Spider()


sp = GuideSpider()
sp.start()
