import requests
from lxml import etree
from fake_useragent import UserAgent
import re

class MzbSpider:
    def __init__(self):
        self.index_url = 'http://www.mca.gov.cn/article/sj/xzqh/2020/'
        self.headers = {'User-Agent':UserAgent(path='fake_useragent.json').random}

    def get_html(self, url):
        """功能函数1"""
        html = requests.get(url=url, headers=self.headers).text

        return html

    def xfunc(self, html, x):
        """功能函数2"""
        eobj = etree.HTML(html)
        r_list = eobj.xpath(x)

        return r_list

    def parse_html(self):
        """爬虫逻辑函数由此开始"""
        index_html = self.get_html(url=self.index_url)
        # 提取最新月份的href
        x = '//table/tr[1]/td[@class="arlisttd"]/a/@href'
        href_list = self.xfunc(index_html, x)
        if href_list:
            href = href_list[0]
            # 二级页面提取具体数据函数
            self.get_data(href)
        else:
            print('最新月份连接未提取成功')

    def get_data(self, href):
        """提取详情页具体数据"""
        two_url = 'http://www.mca.gov.cn' + href
        two_html = self.get_html(url=two_url)
        # 从two_html中提取跳转之后的真实链接
        """
        <script>
            window.location.href="http://www.mca.gov.cn/article/sj/xzqh/2020/2020/202101041104.html";
        </script>
        """
        # 正则解析提取最新月份真实链接
        regex = 'window.location.href="(.*?)"'
        real_url_list = re.findall(regex, two_html, re.S)
        if real_url_list:
            real_url = real_url_list[0]
            # 提取具体真实的数据
            self.get_real_data(real_url)
        else:
            print('未提取到真实链接')

    def get_real_data(self, real_url):
        """提取真实的数据"""
        real_html = self.get_html(real_url)
        two_x = '//tr[@height="19"]'
        # tr_list: [3210个节点对象]
        tr_list = self.xfunc(real_html, two_x)
        for tr in tr_list:
            code_list = tr.xpath('./td[2]//text()')
            name_list = tr.xpath('./td[3]/text()')
            if name_list and code_list:
                item = {}
                item['name'], item['code'] = name_list[0], code_list[0]
                print(item)
            else:
                print('具体数据提取失败')


if __name__ == '__main__':
    spider = MzbSpider()
    spider.parse_html()




















