#!/usr/bin/env python
# encoding: utf-8
"""
#-------------------------------------------------------------------#
#                   CONFIDENTIAL --- CUSTOM STUDIOS                 #     
#-------------------------------------------------------------------#
#                                                                   #
#                   @Project Name : Globallawonline                #
#                                                                   #
#                   @File Name    : phl_judgov_appdec.py                      #
#                                                                   #
#                   @Programmer   : 李建                            #
#                                                                   #  
#                   @Start Date   : 2021/1/12 17:30                 #
#                                                                   #
#                   @Last Update  : 2021/1/12 17:30                 #
#                                                                   #
#-------------------------------------------------------------------#
# Classes:该类用于进行菲律宾最高法院电子图书馆的Court of Appeals Decisions模块判例数据采集  #
#                                                                   #
#-------------------------------------------------------------------#
"""
import hashlib

import scrapy
from ..items import MyFileItem
import re


class Spider (scrapy.Spider):
    name = 'phl_judgov_appdec'
    allowed_domains = ['elibrary.judiciary.gov.ph/thebookshelf/']
    # 菲律宾最高法院电子图书馆Court of Appeals Decisions模块地址
    start_urls = ['https://elibrary.judiciary.gov.ph/thebookshelf/21']
    id = 0

    # 解析初始页面
    def parse(self, response):
        # 获得个年份成员列表
        trs = response.xpath('//*[@id="container_date"]/a')
        for tr in trs:
            # 详细网址
            detailUrlS = tr.xpath('./@href')
            if len(detailUrlS):
                yield response.follow(tr, callback=self.mothparse, dont_filter=True)

    # 解析年份成员列表页
    def mothparse(self, response):
        # 获得成员列表
        trs = response.xpath('//*[@id="container_title"]/ul/li')
        for tr in trs:
            # 判例编号
            LawNoS = tr.xpath('./a/strong/text()')
            LawNo = ''
            if len(LawNoS):
                LawNo = LawNoS.get().strip()
                LawNo = re.sub(r'''^ *''', r'''''', LawNo)  # 去除开头的空格
            # 裁决日期
            cDateS = tr.xpath('./a/text()')
            cDate = ''
            if len(cDateS):
                for cDateSl in cDateS:
                    cDate = cDate + ' ' + cDateSl.get().strip()
                cDate = re.sub(r'''^ *''', r'''''', cDate)  # 去除开头的空格
            # 判例名称
            titleS = tr.xpath('./a/small/text()')
            title = ''
            if len(titleS):
                for titleSl in titleS:
                    title = title + ' ' + titleSl.get().strip()
                title = re.sub(r'''^ *''', r'''''', title)  # 去除开头的空格
            # 详细网址
            detailUrlS = tr.xpath('./a/@href')
            if len(detailUrlS):
                detailUrl = detailUrlS.get().strip()
                yield response.follow(detailUrl, callback=self.detaiparse, dont_filter=True,
                                      meta={'LawNo': LawNo, 'cDate': cDate, 'title': title})

    # 解析月份成员列表页
    def detaiparse(self, response):
        detailUrls = response.xpath('//*[@id="left"]/div/div[1]/a/@href')
        metaa = response.meta
        if len(detailUrls):
            detailUrl = detailUrls.get().strip()
            yield response.follow(detailUrl, callback=self.detailed, dont_filter=True, meta=metaa)

    # 解析详情页
    def detailed(self, response):
        item = MyFileItem()
        if response.status == 200:
            # 法律详细网址
            nowurl = response.url
            detailUrl = nowurl
            # 案例编号
            LawNo = response.meta['LawNo']
            # 裁决日期
            cDate = response.meta['cDate']

            country = 'Philippines'
            website = 'judiciary'
            modular = 'CourtofAppealsDecisions'
            # 下载文件格式
            ext = 'pdf'

            # 下载文件名
            fina = ''
            if len(detailUrl) > 0:
                fina = 'f' + str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())
            # 标题名
            titlel = response.meta['title']
            title = re.sub(r'''"''', r"'", titlel)  # 修改双引号为单引号

            # 唯一ID
            systemid = str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())

            item['file_urls'] = ''
            item['country'] = country
            item['website'] = website
            item['modular'] = modular
            item['ext'] = ext
            item['fina'] = fina
            item['title'] = ''
            item['abstractUrl'] = ''
            item['abstract'] = ''
            item['dabstractUrl'] = ''
            item['detail'] = ''
            item['detailUrl'] = detailUrl
            item['downloadUrl'] = ''

            item['Title'] = title
            item['CaseNumber'] = LawNo
            item['KeyWord'] = ''
            item['SortA'] = 'LAWCOUNTRYFLB'
            item['People'] = ''
            item['CaseOfAction'] = ''
            item['UseLaw'] = ''
            item['AdjudicationDate'] = cDate
            item['FullText'] = ''
            item['JudgAgency'] = ''
            item['SortB'] = 'LANGUAGEYY'
            item['SortC'] = ''
            item['CaseSummary'] = ''
            item['Articles'] = ''
            item['Chapter'] = ''
            item['Section'] = ''
            item['SYS_FLD_DIGITFILENAME'] = fina
            item['FileUrl'] = ''
            item['AbstractFileName'] = ''
            item['DownLoadUrl'] = detailUrl
            item['DownLoadWebNameC'] = '菲律宾最高法院电子图书馆'
            item['DownLoadWebNameE'] = "Supreme Court E-Library"
            item['SYSID'] = systemid
            item['Website'] = 'Court of Appeals Decisions'
            item['Isconversion'] = '1'
            item['CaseDate'] = ''

            yield item

