# -*- coding: utf-8 -*-
# @Time    : 2019/1/30 11:41
# @Author  : yueconger
# @File    : law_spider.py
import requests
from chinacourt_law import generate_random_str as random_str
from lxml import etree
import json
import time
import os
import re


class SpiderLaw(object):
    def __init__(self):
        self.page_url = 'https://www.chinacourt.org/law/more/law_type_id/MzAwNEAFAA%3D%3D/page/{}.shtml'
        self.origin_url = 'https://www.chinacourt.org'
        self.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Host': 'www.chinacourt.org',
            'Referer': 'https://www.chinacourt.org/law/more/law_type_id/MzAwNEAFAA%3D%3D/page/35.shtml',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
        }

    def get_page(self):
        html_path = r'E:\LocalServer\chinacourt\page/'
        detail_path = r'E:\LocalServer\chinacourt\html/'
        for j in range(4):
            i = 4 - j
            url = self.page_url.format(i)
            response = requests.get(url, headers=self.headers)
            file_path = html_path + str(i) + '.html'
            print(file_path)
            with open(file_path, 'w', encoding='utf-8') as f:
                f.write(response.text)
            print('当前第%s页保存完成!'% i)

            html = etree.HTML(response.text)
            li_list = html.xpath('//div[@class="law_left"]/div[@class="law_list"]/ul/li')
            print(len(li_list))
            for li in li_list:
                generate_random_str = random_str.generate_random_str()
                print('当前处理第%s页' % i)
                print(li.xpath('./span/a/@href')[0])
                href = li.xpath('./span/a/@href')[0]
                title = li.xpath('./span/a/text()')[0]
                self.headers['Referer'] = url
                detail_url = self.origin_url + href
                res = requests.get(detail_url, headers=self.headers)
                detail_law_path = detail_path + generate_random_str + '.html'
                with open(detail_law_path, 'w', encoding='utf-8') as f:
                    f.write(res.text)
                    print('%s 保存完成!' % generate_random_str)
                time.sleep(0.5)

    def get_law(self):
        pass

    def process_law_name(self):
        file_path = r'E:\LocalServer\chinacourt\html/'
        file_path_new = r'E:\LocalServer\chinacourt\html_new'
        html_list = os.listdir(file_path)  # 列出文件夹下所有的目录与文件
        count = 0
        for i in range(0, len(html_list)):
            html_path = os.path.join(file_path, html_list[i])

            with open(html_path, 'r', encoding='utf-8') as f:
                content = f.read()
            title_pattern = '<strong>.*?([\u4e00-\u9fa5].*?)<'
            title_res = re.findall(title_pattern, content)
            new_title_res = []
            old_name = html_path.split('/')[-1].split('.')[0]
            for ti in title_res:
                ti = ti.strip()
                new_title_res.append(ti)

            if len(title_res) > 0:
                print('原标题:', html_path.split('/')[-1].split('.')[0])
                # print('新标题:', '-'.join(new_title_res))
                # name = '-'.join(new_title_res)
                print('新标题:', title_res[0].strip())
                name = title_res[0]
            else:
                print('-----', html_path)
                title_pattern_1 = "<font class='MTitle'>.*?([\u4e00-\u9fa5].*?)<"
                title_res_1 = re.findall(title_pattern_1, content)
                if len(title_res_1)> 0:
                    print('原标题:', html_path.split('/')[-1].split('.')[0])
                    print('新标题:', title_res_1[0].strip())
                    name = title_res_1[0].strip()
                else:
                    title_pattern_2 = '<div class = "content_text">.*?([\u4e00-\u9fa5].*?)<'
                    title_res_2 = re.findall(title_pattern_2, content)
                    if len(title_res_2) > 0:
                        print('原标题:', html_path.split('/')[-1].split('.')[0])
                        print('新标题:', title_res_2[0].strip())
                        name = title_res_2[0].strip()
                    else:
                        print('============')
                        name = old_name
            name_file = name + '.html'
            name_file = re.sub('&nbsp;', '', name_file)
            html_path_new = os.path.join(file_path_new, name_file)
            with open(html_path_new, 'w', encoding='utf-8') as f:
                f.write(content)
                print(name, '写入完成!')


if __name__ == '__main__':
    spider = SpiderLaw()
    # spider.get_page()
    spider.process_law_name()
