import hashlib

import scrapy
from ..items import MyFileItem
import re


class ThaCourt (scrapy.Spider):
    name = 'tha_conscourt'
    allowed_domains = ['constitutionalcourt.or.th/th/occ_web/']
    # 泰国宪法法院地址
    start_urls = ['http://www.constitutionalcourt.or.th/th/occ_web/more_news.php?cid=34']
    id = 0

    # 解析初始页面
    def parse(self, response):
        # 获得个年份成员列表
        trs = response.xpath('//ul[@class="list-group"]/li')
        for tr in trs:
            # 各年份成员网址
            yearUrlS = tr.xpath('./div/a/@href')
            yearlUrl = ''
            if len(yearUrlS):
                yeara = tr.xpath('./div/a')[0]
                yield response.follow(yeara, callback=self.yearparse, dont_filter=True)

    # 解析年份成员列表页
    def yearparse(self, response):
        # 获得各年份的人员列表
        trs = response.xpath('//ul[@class="list-group"]/li')
        for tr in trs:
            # 当事人
            peopleS = tr.xpath('./div/a/div[@class="media-body"]/div[@class="list-folder"]/text()')
            people = ''
            if len(peopleS):
                for peopleSl in peopleS:
                    people = people + ' ' + peopleSl.get().strip()
                people = re.sub(r'''^ *''', r'''''', people)  # 去除开头的空格
            # 各模块网址
            mothUrlS = tr.xpath('./div/a/@href')
            mothUrl = ''
            if len(mothUrlS):
                motha = tr.xpath('./div/a')[0]
                mothUrl = 'http://' + self.allowed_domains[0] + mothUrlS.get().strip()

            if len(mothUrl):
                yield response.follow(motha, callback=self.mothparse, dont_filter=True,  meta={'people': people})

    # 解析各年份的人员列表页
    def mothparse(self, response):
        people = response.meta['people']
        # 获得成员列表
        trs = response.xpath('//ul[@class="list-group"]/li')
        for tr in trs:
            # 详细网址
            detailUrlS = tr.xpath('./div/a/@href')
            detailUrl = ''
            if len(detailUrlS):
                a = tr.xpath('./div/a')[0]
                detailUrl = 'http://' + self.allowed_domains[0] + detailUrlS.get().strip()
            # 标题
            titleS = tr.xpath('./div/a/div[@class="media-body"]//text()')
            title = ''
            if len(titleS):
                for titleSl in titleS:
                    title = title + ' ' + titleSl.get().strip()
                title = re.sub(r'''^ *''', r'''''', title)  # 去除开头的空格
            # 裁决日期
            res_tr = r'''[0-9]{1,2}/[0-9]{1,2}/[0-9]{4}'''  # 该网页的裁决日期在标题字符串中，需要通过正则表达式抽取
            re_tr = re.findall(res_tr, title, re.S)
            adjudicationDate = ''
            if len(re_tr):
                adjudicationDate = re_tr[0]
                yearDateS = re.findall(r'''[0-9]{4}''', adjudicationDate, re.S)     # 将佛历年日期转换为公元年
                yearDate = yearDateS[0]     # 取日期中的年份
                dyearDate = int(yearDate)
                if dyearDate>2500:  # 判断是否是佛历年，若是则转换为公元年
                    dyearDate = dyearDate-543
                adjudicationDate = re.sub(r'''[0-9]{4}''', str(dyearDate), adjudicationDate)    # 用公元年的年份替换佛历年的年份

            if len(detailUrl):
                yield response.follow(a, callback=self.detailed, dont_filter=True,
                                      meta={'people': people, 'title': title,
                                            'adjudicationDate': adjudicationDate})
        # 翻页
        nextS = response.xpath('//ul[@class="pagination"]/li')
        if len(nextS)>1:
            Current_pagenow = ''
            Current_pageS = response.xpath('//ul[@class="pagination"]/li[@class="page-item active"]/a/text()')
            if len(Current_pageS):
                Current_pagenow = Current_pageS.get().strip()
            pagenow = int(Current_pagenow)
            for nexttextS in nextS:
                nexttextl = nexttextS.xpath('./a/text()')
                if len(nexttextl):
                    nexttext = nexttextl.get().strip()
                    try:
                        nextpage = int(nexttext)
                        if nextpage == pagenow + 1:
                            nexta = nexttextS.xpath('./a')[0]
                            nextUrlS = nexttextS.xpath('./a/@href')
                            if len(nextUrlS):
                                yield response.follow(nexta, callback=self.mothparse, dont_filter=True,
                                                      meta={'people': people})
                                break
                        else:
                            pass
                    except Exception as e:
                        print('字符转数字失败:' + str(e))

    # 解析详情页
    def detailed(self, response):
        item = MyFileItem()
        if response.status == 200:
            def iadd():
                self.id += 1
                return self.id

            # 法律详细网址
            detailUrl = response.url
            # 当事人
            people = response.meta['people']
            # 标题
            title = response.meta['title']
            title = re.sub(r'''"''', r"'", title)  # 修改双引号为单引号
            # 裁决日期
            adjudicationDate = response.meta['adjudicationDate']

            country = 'Thailand'
            website = 'Conscourt'
            modular = 'Case'
            # 下载文件格式
            ext = 'pdf'

            if len(detailUrl) > 0:
                # 下载文件名
                fina = 'f' + str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())
                # 唯一ID
                systemid = str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())
                iid = iadd()

                item['file_urls'] = ''
                item['country'] = country
                item['website'] = website
                item['modular'] = modular
                item['ext'] = ext
                item['fina'] = fina
                item['title'] = ''
                item['abstractUrl'] = ''
                item['abstract'] = ''
                item['dabstractUrl'] = ''
                item['detail'] = ''
                item['detailUrl'] = ''
                item['downloadUrl'] = detailUrl

                item['Title'] = title
                item['CaseNumber'] = ''
                item['KeyWord'] = ''
                item['SortA'] = 'LAWCOUNTRYTG'
                item['People'] = people
                item['CaseOfAction'] = ''
                item['UseLaw'] = ''
                item['AdjudicationDate'] = adjudicationDate
                item['FullText'] = ''
                item['JudgAgency'] = ''
                item['SortB'] = 'LANGUAGETY'
                item['SortC'] = ''
                item['CaseSummary'] = ''
                item['Articles'] = ''
                item['Chapter'] = ''
                item['Section'] = ''
                item['SYS_FLD_DIGITFILENAME'] = fina
                item['FileUrl'] = ''
                item['AbstractFileName'] = ''
                item['DownLoadUrl'] = detailUrl
                item['DownLoadWebNameC'] = '泰国立宪法院'
                item['DownLoadWebNameE'] = "Thai Constitutional Court"
                item['SYSID'] = systemid
                item['Website'] = 'Constitutional Court'
                item['Isconversion'] = '0'
                item['CaseDate'] = ''

                yield item




