import hashlib
import re

import scrapy
from ..items import MyFileItem


class Spider(scrapy.Spider):
    name = 'lawphi_supre_scres'
    allowed_domains = ['lawphil.net/sc_res']
    # 法案第一页地址
    start_urls = ['https://lawphil.net/sc_res/sc_res.html']
    id = 0

    def parse(self, response):
        # 获取首页成员列表
        trs = response.xpath('//td[@colspan="2"]//tr[@bgcolor="#fbedfa"]/td')
        for tr in trs:
            # 年份网址
            yearUrlS = tr.xpath('./a/@href')
            if len(yearUrlS):
                yeara = tr.xpath('./a')[0]
                yield response.follow(yeara, callback=self.yearparse, dont_filter=True)

    def yearparse(self, response):
        # 获得年份网址成员列表
        trs = response.xpath('//*[@colspan="2"]//*/tr[@valign="top"]')
        if not len(trs):
            trs = response.xpath('//*[@colspan="2"]//*/tr[@bgcolor="#fbedfa"]/td')
            for tr in trs:
                # 各模块网址
                mothUrlS = tr.xpath('./a/@href')
                if len(mothUrlS):
                    motha = tr.xpath('./a')[0]
                    yield response.follow(motha, callback=self.yearparse, dont_filter=True)
        for tr in trs:
            # 判例编码
            casecodeS = tr.xpath('./td[1]/a/text()')
            casecode = ''
            if len(casecodeS):
                casecode = casecodeS.get().strip()
            # 详细网址
            detailUrlS = tr.xpath('./td[3]/a/@href')
            detailUrl = ''
            if len(detailUrlS):
                a = tr.xpath('./td[3]/a')[0]
                detailUrl = 'https://' + self.allowed_domains[0] + detailUrlS.get().strip()
            # 判决日期
            judgementdateS = tr.xpath('./td[1]/text()')
            judgementdate = ''
            if len(judgementdateS):
                for judgementdateSl in judgementdateS:
                    judgementdate = judgementdate + ' ' + judgementdateSl.get().strip()
                judgementdate = re.sub(r'''^ *''', r'''''', judgementdate)  # 去除开头的空格
            # 判例标题
            titleS = tr.xpath('./td[2]//text()')
            title = ''
            if len(titleS):
                for titleSl in titleS:
                    title = title + ' ' + titleSl.get().strip()
                title = re.sub(r'''^ *''', r'''''', title)  # 去除开头的空格
                title = re.sub(r'''"''', r"'", title)  # 将双引号变为单引号

            if len(detailUrl):
                yield response.follow(a, callback=self.detailed, dont_filter=True,
                                      meta={'casecode': casecode, 'detailUrl': detailUrl,
                                            'judgementdate': judgementdate, 'title': title})

    # 解析详情页
    def detailed(self, response):
        item = MyFileItem()
        if response.status == 200:
            # 根据css选择条件抽取内容
            def extract_with_css(query):
                s = response.css(query).get()
                # 处理有些名称显示不出来的页面
                if s is None:
                    pass
                return s

            # 国别及文件信息
            country = 'Philippines'
            website = 'lawphil'
            modular = 'suprescres'
            titlel = response.meta['title']
            title = re.sub(r'''"''', r"'", titlel)
            casecode = response.meta['casecode']
            judgementdate = response.meta['judgementdate']
            nowurl = response.url
            detailUrl = nowurl
            downloadUrl = detailUrl

            # 下载文件名
            fina = ''
            if len(detailUrl) > 0:
                fina = 'f' + str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())

            # 唯一ID
            systemid = str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())

            item['file_urls'] = ''
            item['country'] = country
            item['website'] = website
            item['modular'] = modular
            item['ext'] = 'pdf'
            item['fina'] = fina
            item['title'] = title
            item['abstractUrl'] = ''
            item['abstract'] = ''
            item['dabstractUrl'] = ''
            item['detail'] = ''
            item['detailUrl'] = ''
            item['downloadUrl'] = downloadUrl

            item['Title'] = title
            item['CaseNumber'] = casecode
            item['KeyWord'] = ''
            item['SortA'] = 'LAWCOUNTRYFLB'
            item['People'] = ''
            item['CaseOfAction'] = ''
            item['UseLaw'] = ''
            item['AdjudicationDate'] = judgementdate
            item['FullText'] = ''
            item['JudgAgency'] = ''
            item['SortB'] = 'LANGUAGEYY'
            item['SortC'] = ''
            item['CaseSummary'] = ''
            item['Articles'] = ''
            item['Chapter'] = ''
            item['Section'] = ''
            item['SYS_FLD_DIGITFILENAME'] = fina
            item['FileUrl'] = ''
            item['AbstractFileName'] = ''
            item['DownLoadUrl'] = detailUrl
            item['DownLoadWebNameC'] = '菲律宾法律和法律体系数据库'
            item['DownLoadWebNameE'] = "PHILIPPINE LAWS AND JURISPRUDENCE DATABANK"
            item['SYSID'] = systemid
            item['Website'] = 'Supreme Resolutions'
            item['Isconversion'] = '0'
            item['CaseDate'] = ''

            yield item
