# -*- coding: utf-8 -*-
# @Time    : 2019/11/5 15:32
# @Author  : Damn7Kx
# @Software: PyCharm
# -*- coding: utf-8 -*-
import re
import datetime
import scrapy
from urllib.parse import urljoin
from NewsSpider.items import WenshuItem
from NewsSpider.tools.utils import Utils
from w3lib.html import remove_tags


class BjcourtSpider(scrapy.Spider):

    name = 'bjcourt2'
    allowed_domains = ['bjcourt.gov.cn']

    custom_settings = {
        'LOG_LEVEL': 'WARNING',
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.RandomUserAgentMiddleware': 543,
            'NewsSpider.middlewares.ProxyIPMiddleware': 543,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.SolrSendPipeline': 543,
        }
    }

    start_url = 'http://www.bjcourt.gov.cn/cpws/index.htm'

    def yield_time(self):
        begin = datetime.date(2012, 1, 1)
        end = datetime.date(2012, 12, 1)
        # print((end - begin).days)
        # print(type(end - begin))
        for i in range((end - begin).days + 1):
            day = begin + datetime.timedelta(days=i-1)
            day_ = begin + datetime.timedelta(days=i)
            yield str(day), str(day_)

    def start_requests(self):

        for i in self.yield_time():
            startCprq = i[0]
            endCprq = i[1]
            # 刑事 民事 行政 执行 赔偿
            # ajlbs = ['1','2','6','7','8']
            ajlbs = ['6']
            for ajlb in ajlbs:
                # 'zscq': '1',
                data = {
                    "startCprq": startCprq,
                    "endCprq": endCprq,
                    # 'ajlb': ajlb,
                    'zscq': '1',
                    'st': '1',
                    'page': '1',
                }
                yield scrapy.FormRequest(url=self.start_url, callback=self.parse_num, method='POST', formdata=data,
                                         meta=data)

    def parse_num(self, response):
        # print(response.meta)
        datas = {}
        datas['startCprq'] = response.meta['startCprq']
        datas['endCprq'] = response.meta['endCprq']
        # datas['ajlb'] = response.meta['ajlb']
        datas['st'] = response.meta['st']
        try:
            re_match = re.search('qnum\s*=\s*(\d+);', response.text)
            if re_match:
                page = int(re_match.group(1)) // 20 + 1
                print('此页共有[%s]页数据!' % page)
                for i in range(1, page + 1):
                    datas['page'] = str(i)
                    yield scrapy.FormRequest(url=self.start_url, callback=self.page_list, method='POST', formdata=datas,dont_filter=True)
            else:
                page = 1
                print('共有[%s]页数据!' % page)
                datas['page'] = '1'
                yield scrapy.FormRequest(url=self.start_url, callback=self.page_list, method='POST', formdata=datas,dont_filter=True)
        except Exception as e:
            print(e)

    def page_list(self, response):
        urls = response.css('div.layer.p5_0 ul li a::attr(href)').extract()
        news_urls = [urljoin(self.start_url, url) for url in urls]
        print("共有%s个url"%len(news_urls))
        for url in news_urls:
            yield scrapy.Request(url=url,callback=self.parse)

    def parse(self, response):
        item = WenshuItem()
        title = response.css('div.article_hd h3::text').extract_first()
        court_name = response.css('tr:nth-child(1) td:nth-child(1) input::attr(value)').extract_first()
        case_type = response.css('tr:nth-child(1) td:nth-child(2) input::attr(value)').extract_first()
        case_cause = response.css('tr:nth-child(1) td:nth-child(3) input::attr(value)').extract_first()
        case_number_test = response.css('tr:nth-child(2) td:nth-child(2) input::attr(value)').extract_first()
        case_number = case_number_test.replace('(', '（').replace(')', '）')
        pubdate_time_test = response.css('p.p_date::text').extract_first()
        id = Utils.md5_encrypt(case_number)
        try:
            pubdate_time_format = ''.join(pubdate_time_test.split()).split("：")[1]
            pubdate_time_datetime = datetime.datetime.strptime(pubdate_time_format, "%b%d,%Y")
            pubdate_time = pubdate_time_datetime.strftime("%Y-%m-%d")
        except:
            print('代理ip为:',response.meta['proxy'])
            pubdate_time = ''
        referee_time_format = response.css('tr:nth-child(2) td:nth-child(3) input::attr(value)').extract()[0]
        referee_time = datetime.datetime.strptime(referee_time_format, "%Y年%m月%d日").strftime('%Y-%m-%d')
        html_escape = response.css("#cc").extract()[0]
        try:
            pattern = re.compile('(<div class=WordSection1.*?</div>)', re.S)
            re_match = re.search(pattern, html_escape)
            quote_html = re_match.group(1)
        except:
            pattern = re.compile('(<div class=Section1.*?</div>)', re.S)
            re_match = re.search(pattern, html_escape)
            quote_html = re_match.group(1)
        html = quote_html.encode('utf-8').decode('unicode_escape')
        item['title'] = title
        item['id'] = id
        item['court_name'] = court_name
        item['case_type'] = case_type
        item['case_cause'] = case_cause
        item['case_number'] = case_number
        item['province'] = "北京市"
        item['litigant'] = ""
        item['pubdate'] = pubdate_time
        item['referee_time'] = referee_time
        item['html'] = html
        item['content'] = remove_tags(html)
        item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        # dir_path = 'H:\北京法院网'
        # path = f'{dir_path}\{referee_time}\北京\{court_name}\{case_type}'
        # html_path = path + f'\{case_number}.html'
        # item['path'] = path
        # item['html_path'] = html_path
        yield item
