import hashlib

import scrapy
from ..items import MyFileItem
import re
import json


class SsoSup (scrapy.Spider):
    name = 'brn_agc_order'
    allowed_domains = ['agc.gov.bn']
    # 文莱总检察院地址
    start_urls = ['http://www.agc.gov.bn/AGC%20Site%20Pages/Text%20of%20Orders.aspx']
    id = 0

    # 解析初始页面
    def parse(self, response):
        # 获得首页成员列表
        trs = response.xpath('//*[@id="table1"]/tbody/tr')
        for tr in trs:
            # 编号
            LawNoS = tr.xpath('./td[2]//text()')
            LawNo = ''
            if len(LawNoS):
                for LawNoSl in LawNoS:
                    LawNo = LawNo + ' ' + LawNoSl.get().strip()
                LawNo = re.sub(r'''^ *''', r'''''', LawNo)  # 去除开头的空格
            traS = tr.xpath('./td[1]//a')
            for tra in traS:
                # 标题
                titleS = tra.xpath('.//text()')
                title = ''
                if len(titleS):
                    for titleSl in titleS:
                        title = title + ' ' + titleSl.get().strip()
                    title = re.sub(r'''^ *''', r'''''', title)  # 去除开头的空格
                    title = re.sub(r'''"''', r"'", title)  # 将双引号变为单引号
                # 详细网址
                detailUrlS = tra.xpath('./@href')
                detailUrl = ''
                if len(detailUrlS):
                    a = tra
                    detailUrl = 'http://' + self.allowed_domains[0] + detailUrlS.get().strip()
                    res_tr = r'''\.pdf'''  # 测试网址是否是pdf的网址的正则表达式
                    re_tr = re.findall(res_tr, detailUrl, re.S)
                    if len(re_tr):
                        yield response.follow(a, callback=self.detailed, dont_filter=True,
                                              meta={'title': title, 'LawNo': LawNo})

    # 解析详情页
    def detailed(self, response):
        item = MyFileItem()
        if response.status == 200:
            def iadd():
                self.id += 1
                return self.id

            # 法律详细网址
            nowurl = response.url
            detailUrl = nowurl
            item['detailUrl'] = detailUrl
            # 获取下载路径
            item['downloadUrl'] = detailUrl
            # 下载网址
            item['file_urls'] = ''
            # 法律编号
            LawNo = response.meta['LawNo']

            item['country'] = 'Brunei'
            item['website'] = 'agcgovbn'
            item['modular'] = 'order'
            # 下载文件格式
            item['ext'] = 'pdf'

            # 下载文件名
            fina = ''
            if len(detailUrl) > 0:
                fina = 'f' + str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())
            item['fina'] = fina

            # 章节名
            item['chapNo'] = ''

            # 标题名
            title = response.meta['title']
            item['title'] = title
            # 原文内容
            item['htmls'] = ''
            item['htmlUrl'] = ''

            item['abstract'] = ''
            item['abstractUrl'] = ''

            # 唯一ID
            systemid = str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())

            item['LegalName'] = title
            item['Organizaation'] = ''
            item['PublishDate'] = ''
            item['EffectiveDate'] = ''
            item['SortA'] = 'LAWCOUNTRYWL'
            item['SortB'] = 'LANGUAGEYY'
            item['SortC'] = ''
            item['SortD'] = ''
            item['SORTE'] = ''
            item['SORTF'] = ''
            item['Keyword'] = ''
            item['SORTG'] = ''
            item['ChapNo'] = LawNo
            item['Articles'] = ''
            item['Chapter'] = ''
            item['Section'] = ''
            item['SYS_FLD_DIGITFILENAME'] = fina
            item['FileUrl'] = ''
            item['DownLoadUrl'] = detailUrl
            item['DownLoadWebNameC'] = '文莱总检查院办公室'
            item['DownLoadWebNameE'] = "Attorney General Chambers - Laws of Brune"
            item['SYSID'] = systemid
            item['Website'] = 'Order'
            item['Isconversion'] = '0'
            item['Revisionmark'] = ''

            yield item
