# -*- coding: utf-8 -*-
# @Time    : 2019/2/20 15:15
# @Author  : yueconger
# @File    : spider_gov_chinalaw.py
# 信息来源: 中国司法部
import requests
from lxml import etree
import json
import time
import os
import re


class Spider(object):
    def __init__(self):
        self.url = 'http://www.chinalaw.gov.cn'
        self.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Host': 'www.chinalaw.gov.cn',
            'Referer': 'http://www.chinalaw.gov.cn',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
        }

    def json_read(self):
        file_path = r'E:\LocalServer\chinacourt\司法部法律信息\json/'
        html_path = r'E:\LocalServer\chinacourt\司法部法律信息\html'
        json_list = os.listdir(file_path)  # 列出文件夹下所有的目录与文件
        count = 0
        for i in range(0, len(json_list)):
            json_path = os.path.join(file_path, json_list[i])
            type_name = json_path.split('/')[-1].split('.')[0]
            print('----------当前模块:', type_name)
            with open(json_path, 'r', encoding='utf-8')as f:
                con_list = json.load(f)
                # for i in range(0, len(con_list)):
                for con in con_list:
                    # con = con_list[i]
                    url = self.url + con['infostaticurl']
                    law_name = con['listtitle']
                    html_save_path = os.path.join(html_path, type_name, ''.join([law_name, '.html']))
                    self.get_info(url, law_name, html_save_path)
                    # count += 1

    def get_info(self, url, law_name, html_save_path):
        response = requests.get(url, headers=self.headers)
        html = response.content.decode()
        with open(html_save_path, 'w', encoding='utf-8') as f:
            f.write(html)
            print(law_name, '下载完毕!')


if __name__ == '__main__':
    spider = Spider()
    spider.json_read()
