import hashlib

import scrapy
from ..items import MyFileItem
import re
import json


class vbpSpider(scrapy.Spider):
    name = 'mmr_pyi'
    allowed_domains = ['pyithuhluttaw.gov.mm']
    # 缅甸规范性法律文件数据库地址
    start_urls = ['https://www.pyithuhluttaw.gov.mm/laws']
    id = 0
    Current_page = ''

    def parse(self, response):
        # 获得法律文件类别成员列表
        trs = response.xpath('//*[@id="block-gavias-vinor-content"]//table[@class="cols-5"]/tbody/tr')
        for tr in trs:
            publishDateS = tr.xpath('./td[2]//text()')     # 生效日期
            publishDate = ''
            if len(publishDateS):
                for publishDatel in publishDateS:
                    publishDate = publishDate + ' ' + publishDatel.get().strip()
                publishDate = re.sub(r'''^ *''', r'''''', publishDate)  # 去除开头的空格
            chapNoS = tr.xpath('./td[3]//text()')       # 法律编号
            chapNo = ''
            if len(chapNoS):
                for chapNol in chapNoS:
                    chapNo = chapNo + ' ' + chapNol.get().strip()
                chapNo = re.sub(r'''^ *''', r'''''', chapNo)  # 去除开头的空格
            legalNameS = tr.xpath('./td[4]//text()')    # 法律名称
            legalName = ''
            if len(legalNameS):
                for legalNamel in legalNameS:
                    legalName = legalName + ' ' + legalNamel.get().strip()
                legalName = re.sub(r'''^ *''', r'''''', legalName)  # 去除开头的空格
                legalName = re.sub(r'''"''', r"'", legalName)  # 将双引号变为单引号

            detailUrlS = tr.xpath('./td[5]//a/@href')
            detailUrl = ''
            if len(detailUrlS):
                detailUrla = tr.xpath('./td[5]//a')[0]
                detailUrl = detailUrlS.get().strip()
                yield response.follow(detailUrla, callback=self.detailed, dont_filter=True,
                                      meta={'publishDate': publishDate, 'chapNo': chapNo, 'legalName': legalName})
        # 翻页
        nexta = response.css('#block-gavias-vinor-content nav ul li.pager__item.pager__item--next > a')
        for a in nexta:
            yield response.follow(a, callback=self.parse, dont_filter=True)
            continue

    # 解析详情页
    def detailed(self, response):
        item = MyFileItem()
        if response.status == 200:
            def iadd():
                self.id += 1
                return self.id

            nowurl = response.url
            detailUrl = nowurl
            publishDate = response.meta['publishDate']
            chapNo = response.meta['chapNo']
            legalName = response.meta['legalName']

            fina = 'f' + str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())
            # 唯一ID
            systemid = str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())

            item['file_urls'] = ''
            item['country'] = 'Myanmar'
            item['website'] = 'PyithuHluttaw'
            item['modular'] = 'law'
            item['title'] = ''
            item['ext'] = 'pdf'
            item['fina'] = fina
            item['chapNo'] = ''
            item['detailUrl'] = ''
            item['downloadUrl'] = detailUrl
            item['htmls'] = ''
            item['htmlUrl'] = ''
            item['abstract'] = ''
            item['abstractUrl'] = ''

            item['LegalName'] = legalName
            item['Organizaation'] = ''
            item['PublishDate'] = publishDate
            item['EffectiveDate'] = ''
            item['SortA'] = 'LAWCOUNTRYMD'
            item['SortB'] = 'LANGUAGEMDY'
            item['SortC'] = ''
            item['SortD'] = ''
            item['SORTE'] = ''
            item['SORTF'] = ''
            item['Keyword'] = ''
            item['SORTG'] = ''
            item['ChapNo'] = chapNo
            item['Articles'] = ''
            item['Chapter'] = ''
            item['Section'] = ''
            item['SYS_FLD_DIGITFILENAME'] = fina
            item['FileUrl'] = ''
            item['DownLoadUrl'] = detailUrl
            item['DownLoadWebNameC'] = '缅甸Pyithu Hluttaw网站'
            item['DownLoadWebNameE'] = 'Law | Pyithu Hluttaw of Myanmar'
            item['SYSID'] = systemid
            item['Website'] = 'Pyithu Hluttaw'
            item['Isconversion'] = '0'
            item['Revisionmark'] = ''

            yield item

