import hashlib

import scrapy
from ..items import MyFileItem
import re
import json


class SsoBillsSupplement(scrapy.Spider):
    name = 'sso_bil_sup'
    allowed_domains = ['sso.agc.gov.sg']
    # 法案第一页地址
    start_urls = [
        'https://sso.agc.gov.sg/Browse/Bills-Supp/Published/All?SortBy=Year&SortOrder=DESC',
    ]
    id = 0

    def parse(self, response):
        # 获得每页列表
        trs = response.xpath('//*[@id="listPanel"]/table/tbody/tr')
        for tr in trs:
            yearS = tr.xpath('./td[1]/text()')
            year = ''
            if len(yearS):
                year = yearS.get().strip()

            detailUrlS = tr.xpath('./td[2]/a/@href')
            detailUrl = ''
            if len(detailUrlS) > 0:
                detailUrl = 'https://' + self.allowed_domains[0] + detailUrlS.get().strip()

            a = tr.xpath('./td[2]/a')[0]
            title = a.css('::text').get().strip()

            # 法律法规编号
            chapNoS = tr.xpath('./td[3]/text()')
            chapNo = ''
            if len(chapNoS) > 0:
                chapNo = chapNoS.get().strip()

            downloadUrlS = tr.xpath('./td[4]/a/@href')
            downloadUrl = ''
            if len(downloadUrlS):
                downloadUrl = 'https://' + self.allowed_domains[0] + downloadUrlS.get().strip()

            yield response.follow(a, self.detailed,
                                  meta={'detailUrl': detailUrl, 'title': title,
                                        'chapNo': chapNo, 'downloadUrl': downloadUrl, 'year': year})
        # 翻页
        for a in response.css('#listPanel .pager-bottom a'):
            yield response.follow(a, callback=self.parse)
            continue

    # 解析详情页
    def detailed(self, response):
        item = MyFileItem()
        if response.status == 200:

            # 法律详细网址
            detailUrl = response.meta['detailUrl']
            item['detailUrl'] = detailUrl

            # 获取下载路径
            downloadUrl = response.meta['downloadUrl']
            item['downloadUrl'] = ''
            # 下载网址
            item['file_urls'] = [downloadUrl]
            item['Isconversion'] = '0'

            item['country'] = 'Singapore'
            item['website'] = 'sso'
            item['modular'] = 'bilsup'
            # 文件名以及下载文件格式
            item['title'] = response.meta['title']
            item['ext'] = 'pdf'

            # 下载文件名
            fina = ''
            if len(downloadUrl) > 0:
                fina = 'f' + str(hashlib.md5(downloadUrl.encode('utf-8')).hexdigest())
            item['fina'] = fina

            # 章节名
            item['chapNo'] = response.meta['chapNo']

            # 将法律原文网址保存
            html = response.text
            res_tr = r'''{"tocSysId"[\s\S]*}'''  # 从网页源码中获取字符的正则表达式
            re_tr = re.findall(res_tr, html, re.S)  # 利用正则表达式获取字符串并存到链表
            re_a = re_tr[0]  # 取链表的第一个值
            re_b = json.loads(re_a)  # 将正则表达式获取的字符转为字典
            re_c = re_b['fragments']  # 获取字典中的fragments值
            re_d = list(re_c.items())[0]  # 将fragments转为链表并取第一个值

            detailUrl_html = 'https://' + self.allowed_domains[0] + re_b['lazyLoadContentUrl'] + '?TocSysId=' + \
                             re_b['tocSysId'] + '&SeriesId=' + re_d[0] + '&V=29' + '&FragSysId=' + \
                             re_d[1]['Item1'] + '&_=' + re_d[1]['Item2']
            htmlUrl = detailUrl_html
            item['htmlUrl'] = ''
            if not downloadUrl:
                fina = 'f' + str(hashlib.md5(detailUrl_html.encode('utf-8')).hexdigest())
                item['fina'] = fina
                item['htmlUrl'] = htmlUrl
                item['Isconversion'] = '1'
            item['htmls'] = ''
            item['abstract'] = ''
            item['abstractUrl'] = ''

            # 发文机构
            pAgency = ''
            # 发文地址
            pAddressS = response.xpath('//*[@id="topLeftPanel"]/*//div[@class="status-value"]/text()[1]')
            pAddress = ''
            if len(pAddressS) > 0:
                pAddress = pAddressS.get().strip()
            # 发文日期
            pDateS = response.xpath('//*[@id="topLeftPanel"]/*//div[@class="status-value"]/text()[2]')
            pDate = ''
            if len(pDateS) > 0:
                pDate = pDateS.get().strip()
            # 生效日期
            cDateS = response.xpath('//*[@id="legis"]/*//*[@class="cDateL"]/text()')
            cDate = ''
            if len(cDateS) > 0:
                for cDatel in cDateS:
                    cDater = cDatel.get().strip()
                    cDate = cDate + ' ' + cDater
                cDate = re.sub(r'''^ *''', r'''''', cDate)  # 去除开头的空格

            # 本文中词语解释
            keywordS = response.xpath(
                '//*[@id="P1I-"or@id="P11-"or@id="legis"]/div[2]/*//table[2]/*//td[@class="def"]/text()')
            keyword = ""
            if len(keywordS) > 0:
                for Keywordr in keywordS:
                    Keyword_rn = Keywordr.get().strip()
                    Keyword_trS = r'''“[\s\S]*”'''  # 抽取关键词的正则表达式
                    Keyword_tr = re.findall(Keyword_trS, Keyword_rn, re.S)  # 利用正则表达式从字段中抽取关键词
                    if len(Keyword_tr) > 0:
                        keyword = keyword + Keyword_tr[0] + ' '

            # 目录
            contentS = response.xpath('//*[@id="toc"]/*//a/span/text()')
            content = ''
            if len(contentS) > 0:
                content = contentS.get().strip()

            # 唯一ID
            systemid = str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())

            item['LegalName'] = response.meta['title']
            item['Organizaation'] = pAgency
            item['PublishDate'] = pDate
            item['EffectiveDate'] = cDate
            item['SortA'] = 'LAWCOUNTRYXJP'
            item['SortB'] = 'LANGUAGEYY'
            item['SortC'] = ''
            item['SortD'] = ''
            item['SORTE'] = ''
            item['SORTF'] = ''
            item['Keyword'] = ''
            item['SORTG'] = ''
            item['ChapNo'] = response.meta['chapNo']
            item['Articles'] = ''
            item['Chapter'] = ''
            item['Section'] = ''
            item['SYS_FLD_DIGITFILENAME'] = fina
            item['FileUrl'] = ''
            item['DownLoadUrl'] = detailUrl
            item['DownLoadWebNameC'] = '新加坡法律在线'
            item['DownLoadWebNameE'] = 'Singapore Statutes Online'
            item['SYSID'] = systemid
            item['Website'] = 'Bills Supplement'
            item['Revisionmark'] = ''

            yield item
