import hashlib

import scrapy
from ..items import MyFileItem
import re
import json


class Spider (scrapy.Spider):
    name = 'lawphi_exec'
    allowed_domains = ['lawphil.net/executive/']
    # 菲律宾法律体系数据库地址
    start_urls = ['https://lawphil.net/executive/executive.html']
    id = 0

    # 解析初始页面
    def parse(self, response):
        # 获得各模块成员列表
        trs = response.xpath('//tr[@valign="top"]/*//ul[@type="square"]/li')
        for tr in trs:
            # 各模块网址
            modelUrlS = tr.xpath('./a/@href')
            modelUrl = ''
            if len(modelUrlS):
                modela = tr.xpath('./a')[0]
                modelUrl = 'https://' + self.allowed_domains[0] + modelUrlS.get().strip()

            if len(modelUrl):
                yield response.follow(modela, callback=self.modelparse, dont_filter=True)

    # 解析模块列表页
    def modelparse(self, response):
        # 获得个年份成员列表
        trs = response.xpath('//*[@colspan="2"]//*/tr[@bgcolor="#fbedfa"]/td')
        if not trs:
            nowurl = response.url
            yield response.follow(nowurl, callback=self.yearparse, dont_filter=True)
        else:
            for tr in trs:
                # 各模块网址
                yearUrlS = tr.xpath('./a/@href')
                yearlUrl = ''
                if len(yearUrlS):
                    yeara = tr.xpath('./a')[0]
                    yearlUrl = 'https://' + self.allowed_domains[0] + yearUrlS.get().strip()

                if len(yearlUrl):
                    yield response.follow(yeara, callback=self.yearparse, dont_filter=True)

    # 解析年份列表页
    def yearparse(self, response):
        # 获得成员列表
        trs = response.xpath('//*[@colspan="2"]//*/tr[@valign="top"]')
        for tr in trs:
            # 法律法规编号
            LawNoS = tr.xpath('./td[1]/a/text()')
            LawNo = ''
            if len(LawNoS):
                LawNo = LawNoS.get().strip()
            # 详细网址
            detailUrlS = tr.xpath('./td[1]/a/@href')
            detailUrl = ''
            if len(detailUrlS):
                a = tr.xpath('./td[1]/a')[0]
                detailUrl = 'https://' + self.allowed_domains[0] + detailUrlS.get().strip()
            # 生效日期
            cDateS = tr.xpath('./td[1]/text()')
            cDate = ''
            if len(cDateS):
                for cDateSl in cDateS:
                    cDate = cDate + ' ' + cDateSl.get().strip()
                cDate = re.sub(r'''^ *''', r'''''', cDate)  # 去除开头的空格
            # 摘要
            NameS = tr.xpath('./td[2]//text()')
            Name = ''
            if len(NameS):
                for NameSl in NameS:
                    Name = Name + ' ' + NameSl.get().strip()
                Name = re.sub(r'''^ *''', r'''''', Name)  # 去除开头的空格
            if len(detailUrl):
                yield response.follow(a, callback=self.detailed, dont_filter=True,
                                      meta={'LawNo': LawNo, 'detailUrl': detailUrl,
                                            'cDate': cDate, 'Name': Name})
        print(1)

    # 解析详情页
    def detailed(self, response):
        item = MyFileItem()
        if response.status == 200:

            # 法律详细网址
            nowurl = response.url
            detailUrl = nowurl
            item['detailUrl'] = detailUrl
            # 获取下载路径
            item['downloadUrl'] = ''
            # 下载网址
            item['file_urls'] = ''
            # 法律法规编号
            LawNo = response.meta['LawNo']
            # 生效日期
            cDate = response.meta['cDate']
            # 摘要内容
            abstract = response.meta['Name']

            item['country'] = 'Philippines'
            item['website'] = 'lawphil'
            item['modular'] = 'execu'
            # 下载文件格式
            item['ext'] = 'pdf'

            # 下载文件名
            fina = ''
            if len(detailUrl) > 0:
                fina = 'f' + str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())
            item['fina'] = fina

            # 章节名
            item['chapNo'] = ''

            # 标题名
            titlel = response.meta['Name']
            title = re.sub(r'''"''', r"'", titlel)  # 修改双引号
            item['title'] = title
            # 原文内容
            htmlsS = response.xpath('//blockquote')
            htmls = ''
            if len(htmlsS) > 0:
                htmls = htmlsS.get().strip()
            item['htmls'] = htmls
            item['htmlUrl'] = ''

            item['abstract'] = abstract
            item['abstractUrl'] = ''

            # 唯一ID
            systemid = str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())

            item['LegalName'] = title
            item['Organizaation'] = ''
            item['PublishDate'] = ''
            item['EffectiveDate'] = cDate
            item['SortA'] = 'LAWCOUNTRYFLB'
            item['SortB'] = 'LANGUAGEYY'
            item['SortC'] = ''
            item['SortD'] = ''
            item['SORTE'] = 'EFFECTIVELEVELXZFG'
            item['SORTF'] = ''
            item['Keyword'] = ''
            item['SORTG'] = ''
            item['ChapNo'] = LawNo
            item['Articles'] = ''
            item['Chapter'] = ''
            item['Section'] = ''
            item['SYS_FLD_DIGITFILENAME'] = fina
            item['FileUrl'] = ''
            item['DownLoadUrl'] = detailUrl
            item['DownLoadWebNameC'] = '菲律宾法律和法律体系数据库'
            item['DownLoadWebNameE'] = 'PHILIPPINE LAWS AND JURISPRUDENCE DATABANK'
            item['SYSID'] = systemid
            item['Website'] = 'Executive Orders'
            item['Isconversion'] = '1'
            item['Revisionmark'] = ''

            yield item
