import hashlib

import scrapy
from ..items import MyFileItem
import re
import json


class vbpSpider(scrapy.Spider):
    name = 'vnm_vbp'
    allowed_domains = ['vbpl.vn']
    # 越南规范性法律文件数据库地址
    start_urls = ['http://vbpl.vn/TW/Pages/vbpqen.aspx']
    id = 0
    Current_page = ''

    def parse(self, response):
        # 获得法律文件类别成员列表
        trs = response.xpath('//*[@id="ctl00_ctl37_g_616a9790_c1c3_47ad_aa23_3897da912e9b"]/div[2]//ul['
                             '@class="category"]/li')
        for tr in trs:
            websiteUrlS = tr.xpath('.//a/@href')
            websiteUrl = ''
            if len(websiteUrlS):
                websiteUrla = tr.xpath('.//a')[0]
                websiteUrl = 'http://' + self.allowed_domains[0] + websiteUrlS.get().strip()
            websiteS = tr.xpath('.//text()')
            website = ''
            if len(websiteS):
                for websitel in websiteS:
                    website = website + ' ' + websitel.get().strip()
                    new_website = re.sub(r'''^ ?''', r'''''', website)
                    website = new_website
            if len(websiteUrl):
                yield response.follow(websiteUrla, callback=self.websited, dont_filter=True,
                                      meta={'website': website})

    def websited(self, response):
        # 获取法律成员列表
        trs = response.xpath('//*[@id="tabVB_lv1"]/div[2]/ul/li')
        website = response.meta['website']
        for tr in trs:
            publishDate = ''
            effectiveDate = ''
            revisionmark = ''
            chapNoS = tr.xpath('./div/p[@class="title"]//text()')  # 法律法规编号
            chapNo = ''
            if len(chapNoS):
                for chapNor in chapNoS:
                    chapNo = chapNo + ' ' + chapNor.get().strip()
                chapNo = re.sub(r'''^ *''', r'''''', chapNo)  # 去除开头的空格
            detailUrlS = tr.xpath('./div/p[@class="title"]/a/@href')
            if len(detailUrlS):
                detailUrla = tr.xpath('./div/p[@class="title"]/a')[0]
                detailUrl = 'http://' + self.allowed_domains[0] + detailUrlS.get().strip()
            legalNameS = tr.xpath('./div/div[@class="left"]/*[@class="des"]//text()')
            legalName = ''
            if len(legalNameS):
                for legalNamel in legalNameS:
                    legalName = legalName + ' ' + legalNamel.get().strip()
                legalName = re.sub(r'''^ *''', r'''''', legalName)  # 去除开头的空格
                legalName = re.sub(r'''"''', r"'", legalName)  # 将双引号变为单引号
            righttrs = tr.xpath('./div/div[@class="right"]/p')
            for righttr in righttrs:
                lableS = righttr.xpath('./label/text()')
                lable = ''
                if len(lableS):
                    lable = lableS.get().strip()
                    if lable == "Published:":
                        publishDateS = righttr.xpath('./text()')
                        publishDate = ''
                        if len(publishDateS):
                            for publishDatel in publishDateS:
                                publishDate = publishDate + ' ' + publishDatel.get().strip()
                            publishDate = re.sub(r'''^ *''', r'''''', publishDate)  # 去除开头的空格
                    elif lable == "Effective:":
                        effectiveDateS = righttr.xpath('./text()')
                        effectiveDate = ''
                        if len(effectiveDateS):
                            for effectiveDatel in effectiveDateS:
                                effectiveDate = effectiveDate + ' ' + effectiveDatel.get().strip()
                            effectiveDate = re.sub(r'''^ *''', r'''''', effectiveDate)  # 去除开头的空格
                    elif lable == "Status:":
                        revisionmarkS = righttr.xpath('./text()')
                        revisionmark = ''
                        if len(revisionmarkS):
                            for revisionmarkl in revisionmarkS:
                                revisionmark = revisionmark + ' ' + revisionmarkl.get().strip()
                            revisionmark = re.sub(r'''^ *''', r'''''', revisionmark)  # 去除开头的空格

            if len(detailUrlS):
                yield response.follow(detailUrla, callback=self.detailed, dont_filter=True,
                                      meta={'website': website, 'chapNo': chapNo, 'legalName': legalName,
                                            'publishDate': publishDate, 'effectiveDate': effectiveDate,
                                            'revisionmark': revisionmark})
        # 翻页
        nextS = response.xpath('//*[@id="tabVB_lv1"]/div[2]/div/a')
        if len(nextS):
            Current_pagejs = '//*[@id="tabVB_lv1"]/div[2]/div/a[@class="current"]//text()'
            Current_pagenow = ''
            Current_pageS = response.xpath(Current_pagejs)
            if len(Current_pageS):
                Current_pagenow = Current_pageS.get().strip()
            # if Current_pagenow != self.Current_page:
            #    self.Current_page = Current_pagenow
            pagenow = int(Current_pagenow)
            for nexttextS in nextS:
                nexttextl = nexttextS.xpath('./text()')
                if len(nexttextl):
                    nexttext = nexttextl.get().strip()
                    try:
                        nextpage = int(nexttext)
                        if nextpage == pagenow + 1:
                            nextUrlS = nexttextS.xpath('./@href')
                            if len(nextUrlS):
                                nextUrl = 'http://' + self.allowed_domains[0] + nextUrlS.get().strip()
                                yield response.follow(nextUrl, callback=self.websited, dont_filter=True,
                                                      meta={'website': website})
                                break
                    except:
                        print("切换下一页出错")

    # 解析详情页
    def detailed(self, response):
        item = MyFileItem()
        if response.status == 200:

            nowurl = response.url
            detailUrl = nowurl
            website = response.meta['website']
            chapNo = response.meta['chapNo']
            legalName = response.meta['legalName']
            publishDate = response.meta['publishDate']
            effectiveDate = response.meta['effectiveDate']
            revisionmark = response.meta['revisionmark']

            # 原文内容
            htmlsS = response.xpath('//*[@id="ctl00_ctl37_g_43c11f15_c8a0_43a2_ba2b_652da6d240a1"]//div['
                                    '@class="content"]/div[@class="fulltext"]')
            if len(htmlsS):
                htmls = htmlsS.get().strip()

                fina = 'f' + str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())
                # 唯一ID
                systemid = str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())

                item['file_urls'] = ''
                item['country'] = 'Vietnam'
                item['website'] = 'vbpl'
                item['modular'] = 'law'
                item['title'] = legalName
                item['ext'] = 'pdf'
                item['fina'] = fina
                item['chapNo'] = ''
                item['detailUrl'] = ''
                item['downloadUrl'] = ''
                item['htmls'] = htmls
                item['htmlUrl'] = ''
                item['abstract'] = ''
                item['abstractUrl'] = ''

                item['LegalName'] = legalName
                item['Organizaation'] = ''
                item['PublishDate'] = publishDate
                item['EffectiveDate'] = effectiveDate
                item['SortA'] = 'LAWCOUNTRYYN'
                item['SortB'] = 'LANGUAGEYY'
                item['SortC'] = ''
                item['SortD'] = ''
                item['SORTE'] = website
                item['SORTF'] = ''
                item['Keyword'] = ''
                item['SORTG'] = ''
                item['ChapNo'] = chapNo
                item['Articles'] = ''
                item['Chapter'] = ''
                item['Section'] = ''
                item['SYS_FLD_DIGITFILENAME'] = fina
                item['FileUrl'] = ''
                item['DownLoadUrl'] = detailUrl
                item['DownLoadWebNameC'] = '越南规范文件法律数据库'
                item['DownLoadWebNameE'] = 'Legal Normative Documents'
                item['SYSID'] = systemid
                item['Website'] = 'Trung ương'
                item['Isconversion'] = '0'
                item['Revisionmark'] = revisionmark

                yield item
