#!/usr/bin/env python
# encoding: utf-8
"""
#-------------------------------------------------------------------#
#                   CONFIDENTIAL --- CUSTOM STUDIOS                 #     
#-------------------------------------------------------------------#
#                                                                   #
#                   @Project Name : Globallawonline                #
#                                                                   #
#                   @File Name    : phi_judgov_admord.py                      #
#                                                                   #
#                   @Programmer   : 李建                            #
#                                                                   #  
#                   @Start Date   : 2021/1/12 15:45                 #
#                                                                   #
#                   @Last Update  : 2021/1/12 15:45                 #
#                                                                   #
#-------------------------------------------------------------------#
# Classes: 菲律宾最高法院图书馆Administrative Orders数据采集                                                         #
#                                                                   #
#-------------------------------------------------------------------#
"""
import hashlib

import scrapy
from ..items import MyFileItem
import re


class Spider(scrapy.Spider):
    name = 'phi_judgov_admord'
    allowed_domains = ['elibrary.judiciary.gov.ph/thebookshelf/']
    # 菲律宾最高法院电子图书馆Administrative Orders模块地址
    start_urls = ['https://elibrary.judiciary.gov.ph/thebookshelf/6']
    id = 0

    def parse(self, response):
        # 获得成员列表
        trs = response.xpath('//*[@id="container_date"]/a')
        for tr in trs:
            # 详细网址
            detailUrlS = tr.xpath('./@href')
            if len(detailUrlS):
                detailUrl = detailUrlS.get().strip()
                yield response.follow(tr, callback=self.mothparse, dont_filter=True)

    def mothparse(self, response):
        # 获得成员列表
        trs = response.xpath('//*[@id="container_title"]/ul/li')
        for tr in trs:
            # 法律法规编号
            LawNoS = tr.xpath('./a/strong/text()')
            LawNo = ''
            if len(LawNoS):
                LawNo = LawNoS.get().strip()
                LawNo = re.sub(r'''^ *''', r'''''', LawNo)  # 去除开头的空格
            # 生效日期
            cDateS = tr.xpath('./a/text()')
            cDate = ''
            if len(cDateS):
                for cDateSl in cDateS:
                    cDate = cDate + ' ' + cDateSl.get().strip()
                cDate = re.sub(r'''^ *''', r'''''', cDate)  # 去除开头的空格
            # 法律名称
            NameS = tr.xpath('./a/small/text()')
            Name = ''
            if len(NameS):
                for NameSl in NameS:
                    Name = Name + ' ' + NameSl.get().strip()
                Name = re.sub(r'''^ *''', r'''''', Name)  # 去除开头的空格
            # 详细网址
            detailUrlS = tr.xpath('./a/@href')
            if len(detailUrlS):
                detailUrl = detailUrlS.get().strip()
                yield response.follow(detailUrl, callback=self.detaiparse, dont_filter=True,
                                      meta={'LawNo': LawNo, 'cDate': cDate, 'Name': Name})

    def detaiparse(self, response):
        detailUrls = response.xpath('//*[@id="left"]/div/div[1]/a/@href')
        metaa = response.meta
        if len(detailUrls):
            detailUrl = detailUrls.get().strip()
            yield response.follow(detailUrl, callback=self.detailed, dont_filter=True, meta=metaa)

    # 解析详情页
    def detailed(self, response):
        item = MyFileItem()
        if response.status == 200:

            # 法律详细网址
            nowurl = response.url
            detailUrl = nowurl
            item['detailUrl'] = ''
            # 获取下载路径
            item['downloadUrl'] = ''
            # 下载网址
            item['file_urls'] = ''
            # 法律法规编号
            LawNo = response.meta['LawNo']
            # 法律法规名称
            Name = response.meta['Name']
            # 生效日期
            cDate = response.meta['cDate']

            item['country'] = 'Philippines'
            item['website'] = 'judiciary'
            item['modular'] = 'AdministrativeOrders'
            # 下载文件格式
            item['ext'] = 'pdf'

            # 标题名
            title = re.sub(r'''"''', r"'", Name)
            item['title'] = title
            # 下载文件名
            fina = ''
            if len(detailUrl) > 0:
                fina = 'f' + str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())
            item['fina'] = fina

            # 章节名
            item['chapNo'] = ''

            # 原文内容

            item['htmls'] = ''
            item['htmlUrl'] = detailUrl

            item['abstract'] = ''
            item['abstractUrl'] = ''

            # 唯一ID
            systemid = str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())

            item['LegalName'] = title
            item['Organizaation'] = ''
            item['PublishDate'] = ''
            item['EffectiveDate'] = cDate
            item['SortA'] = 'LAWCOUNTRYFLB'
            item['SortB'] = 'LANGUAGEYY'
            item['SortC'] = ''
            item['SortD'] = ''
            item['SORTE'] = 'EFFECTIVELEVELXZFG'
            item['SORTF'] = ''
            item['Keyword'] = ''
            item['SORTG'] = ''
            item['ChapNo'] = LawNo
            item['Articles'] = ''
            item['Chapter'] = ''
            item['Section'] = ''
            item['SYS_FLD_DIGITFILENAME'] = fina
            item['FileUrl'] = ''
            item['DownLoadUrl'] = detailUrl
            item['DownLoadWebNameC'] = '菲律宾最高法院电子图书馆'
            item['DownLoadWebNameE'] = 'Supreme Court E-Library'
            item['SYSID'] = systemid
            item['Website'] = 'Administrative Orders'
            item['Isconversion'] = '1'
            item['Revisionmark'] = ''

            yield item
