import hashlib

import pymysql
import scrapy
from ..items import MyFileItem
import re


class Spider(scrapy.Spider):
    name = 'ind_putusan_pajak'
    allowed_domains = ['putusan.mahkamahagung.go.id/']
    # 法案第一页地址
    start_urls = ['https://putusan3.mahkamahagung.go.id/direktori/index/kategori/pajak-2.html']
    id = 0

    def parse(self, response):
        # 获得成员列表
        trsm = response.xpath('//*[@id="tabs-1"]/*[@id="popular-post-list-sidebar"]/div')
        for trm in trsm:
            # 主题分类
            SortCS = trm.xpath('./div[@class="entry-c"]/div[@class="small"]/a[3]/text()')
            SortC = ''
            if len(SortCS):
                for SortCSl in SortCS:
                    SortC = SortC + ' ' + SortCSl.get().strip()
                SortC = re.sub(r'''^ *''', r'''''', SortC)  # 去除开头的空格

            # 详细网址
            detailUrlS = trm.xpath('./div[@class="entry-c"]/strong/a/@href')
            if len(detailUrlS):
                titleS = trm.xpath('./div[@class="entry-c"]/strong/a//text()')
                title = ''
                if len(titleS):
                    for titleSl in titleS:
                        title = title + ' ' + titleSl.get().strip()
                    title = re.sub(r'''^ *''', r'''''', title)  # 去除开头的空格
                a = trm.xpath('./div[@class="entry-c"]/strong/a')[0]
                detailUrl = detailUrlS.get().strip()
                yield response.follow(a, callback=self.detailed, dont_filter=True,
                                      meta={'title': title, 'SortC': SortC, 'detailUrl': detailUrl})

        nextS = response.xpath('//*[@id="tabs-1"]/div[@class="pagging text-center"]/nav/ul/li/a')
        if len(nextS):
            for nexttextS in nextS:
                try:
                    nexttextl = nexttextS.xpath('./text()')
                    if len(nexttextl):
                        nexttext = nexttextl.get().strip()
                        if nexttext == 'Next':
                            nextUrlS = nexttextS.xpath('./@href')
                            if len(nextUrlS):
                                nextUrl = nextUrlS.get().strip()
                                yield response.follow(nextUrl, callback=self.parse, dont_filter=True)
                                break
                except Exception as e:
                    print(str(e))

    # 解析详情页
    def detailed(self, response):
        item = MyFileItem()
        if response.status == 200:
            Is_date = True  # 判例日期是否在2013年之后
            # 国别及文件信息
            country = 'Indonesia'
            website = 'putusan'
            modular = 'Pajak'
            SortC = response.meta['SortC']
            title = response.meta['title']
            nowurl = response.url
            detailUrl = nowurl
            downloadUrl = ''
            caseNumber = ''
            adjudicationDate = ''
            readDate = ''
            judgAgency = ''
            abstract = ''
            caseSummary = ''
            # 获得年份网址成员列表
            trs = response.xpath('//*[@id="popular-post-list-sidebar"]/ul/table/tbody/tr')
            for tr in trs:
                tdclassS = tr.xpath('./td[1]/text()')
                tdclass = ''
                if len(tdclassS):
                    for tdclassSl in tdclassS:
                        tdclass = tdclass + tdclassSl.get().strip()
                if tdclass == 'Nomor':
                    # 案号
                    CaseNumberS = tr.xpath('./td[2]//text()')
                    if len(CaseNumberS):
                        for CaseNumberSl in CaseNumberS:
                            caseNumber = caseNumber + ' ' + CaseNumberSl.get().strip()
                        caseNumber = re.sub(r'''^ *''', r'''''', caseNumber)  # 去除开头的空格
                elif tdclass == 'Tanggal Register':
                    # 裁决日期
                    adjudicationDateS = tr.xpath('./td[2]//text()')
                    if len(adjudicationDateS):
                        for adjudicationDateSl in adjudicationDateS:
                            adjudicationDate = adjudicationDate + ' ' + adjudicationDateSl.get().strip()
                        adjudicationDate = adjudicationDate.strip()  # 去除开头结尾的空格
                elif tdclass == 'Tanggal Dibacakan':
                    # 读取日期
                    readDateS = tr.xpath('./td[2]//text()')
                    if len(readDateS):
                        for readDateSl in readDateS:
                            readDate = readDate + ' ' + readDateSl.get().strip()
                        readDate = re.sub(r'''^ *''', r'''''', readDate)  # 去除开头的空格
                elif tdclass == 'Lembaga Peradilan':
                    # 审理机关
                    judgAgencyS = tr.xpath('./td[2]//text()')
                    if len(judgAgencyS):
                        for judgAgencySl in judgAgencyS:
                            judgAgency = judgAgency + ' ' + judgAgencySl.get().strip()
                        judgAgency = re.sub(r'''^ *''', r'''''', judgAgency)  # 去除开头的空格
                elif tdclass == 'Catatan Amar':
                    # 摘要html
                    abstractS = tr.xpath('./td[2]')
                    if len(abstractS):
                        abstract = abstractS.get().strip()
                    caseSummaryS = tr.xpath('./td[2]//text()')
                    if len(caseSummaryS):
                        for caseSummarySl in caseSummaryS:
                            caseSummary = caseSummary + ' ' + caseSummarySl.get().strip()
                        caseSummary = pymysql.escape_string(caseSummary)    # 处理带有多引号的内容
            downloadS = response.xpath('//*[@id="collapseThree"]/div/div/ul/li/a')
            for download in downloadS:
                downloadstrS = download.xpath('./text()')
                if len(downloadstrS):
                    downloadstr = downloadstrS.get().strip()
                    res_tr = r'''.pdf'''  # 从字符串中判断是否为pdf网址的正则表达式
                    re_tr = re.findall(res_tr, downloadstr, re.S)  # 利用正则表达式判断是否为pdf网址
                    if len(re_tr):
                        downloadUrlS = download.xpath('./@href')
                        if len(downloadUrlS):
                            downloadUrl = downloadUrlS.get().strip()

            # 裁决日期
            if adjudicationDate == "—":
                adjudicationDate = readDate
                if adjudicationDate == "—":
                    adjudicationDate = ""
            date_year = re.findall(r'''[0-9]{4}''', adjudicationDate, re.S)
            if len(date_year):
                if int(date_year[0]) < 2013:  # 2013年之后判例不采集
                    Is_date = False
            if Is_date and len(downloadUrl) > 0:
                fina = 'f' + str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())
                # 唯一ID
                systemid = str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())

                item['file_urls'] = [downloadUrl]
                item['country'] = country
                item['website'] = website
                item['modular'] = modular
                item['ext'] = 'pdf'
                item['fina'] = fina
                item['title'] = ''
                item['abstractUrl'] = ''
                item['abstract'] = ''
                item['dabstractUrl'] = ''
                item['detail'] = ''
                item['detailUrl'] = ''
                item['downloadUrl'] = ''

                item['Title'] = title
                item['CaseNumber'] = caseNumber
                item['KeyWord'] = ''
                item['SortA'] = 'LAWCOUNTRYYDNXY'
                item['People'] = ''
                item['CaseOfAction'] = ''
                item['UseLaw'] = ''
                item['AdjudicationDate'] = adjudicationDate
                item['FullText'] = ''
                item['JudgAgency'] = judgAgency
                item['SortB'] = 'LANGUAGEYNIY'
                item['SortC'] = 'Tax'
                item['CaseSummary'] = caseSummary
                item['Articles'] = ''
                item['Chapter'] = ''
                item['Section'] = ''
                item['SYS_FLD_DIGITFILENAME'] = fina
                item['FileUrl'] = ''
                item['AbstractFileName'] = ''
                item['DownLoadUrl'] = detailUrl
                item['DownLoadWebNameC'] = '印度尼西亚法院判决书公布网'
                item['DownLoadWebNameE'] = "Indonesian court decision announcement website"
                item['SYSID'] = systemid
                item['Website'] = 'Putusan Pajak'
                item['Isconversion'] = '0'
                item['CaseDate'] = readDate

                yield item
            else:
                if len(abstract):
                    fina = 'f' + str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())
                    # 唯一ID
                    systemid = str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())
                    item['file_urls'] = ''
                    item['country'] = country
                    item['website'] = website
                    item['modular'] = modular
                    item['ext'] = 'pdf'
                    item['fina'] = fina
                    item['title'] = ''
                    item['abstractUrl'] = ''
                    item['abstract'] = ''
                    item['dabstractUrl'] = ''
                    item['detail'] = abstract
                    item['detailUrl'] = ''
                    item['downloadUrl'] = ''

                    item['Title'] = title
                    item['CaseNumber'] = caseNumber
                    item['KeyWord'] = ''
                    item['SortA'] = 'LAWCOUNTRYYDNXY'
                    item['People'] = ''
                    item['CaseOfAction'] = ''
                    item['UseLaw'] = ''
                    item['AdjudicationDate'] = adjudicationDate
                    item['FullText'] = ''
                    item['JudgAgency'] = judgAgency
                    item['SortB'] = 'LANGUAGEYNIY'
                    item['SortC'] = 'Tax'
                    item['CaseSummary'] = ''
                    item['Articles'] = ''
                    item['Chapter'] = ''
                    item['Section'] = ''
                    item['SYS_FLD_DIGITFILENAME'] = fina
                    item['FileUrl'] = ''
                    item['AbstractFileName'] = ''
                    item['DownLoadUrl'] = detailUrl
                    item['DownLoadWebNameC'] = '印度尼西亚法院判决书公布网'
                    item['DownLoadWebNameE'] = "Indonesian court decision announcement website"
                    item['SYSID'] = systemid
                    item['Website'] = 'Putusan Pajak'
                    item['Isconversion'] = '1'
                    item['CaseDate'] = readDate

                    # yield item


