import hashlib
import time
import re
import scrapy
from scrapy import signals
from scrapy.http import HtmlResponse

from ..items import MyFileItem
from selenium import webdriver


class Spider(scrapy.Spider):
    name = 'lao_gazette'
    allowed_domains = ['laoofficialgazette.gov.la']
    # 老挝官方公报网址
    start_urls = ['http://www.laoofficialgazette.gov.la/index.php?r=site/list&old=0']
    id = 0
    Current_page = ''

    def __init__(self, **kwargs):
        super(Spider, self).__init__(**kwargs)
        self.driver = webdriver.Chrome()  # 调用本地的谷歌浏览器

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(Spider, cls).from_crawler(crawler, *args, **kwargs)
        crawler.signals.connect(spider.closeSpider, signals.spider_closed)
        return spider

    def closeSpider(self):
        self.driver.quit()  # 关闭浏览器

    def parse(self, response):
        # 获得成员列表
        trs = response.xpath('//*[@id="article-grid"]/table[@class="items"]/tbody/tr')
        for tr in trs:
            # 法律名称
            legalNameS = tr.xpath('./td[1]/text()')
            legalName = ''
            if len(legalNameS):
                for legalNameSl in legalNameS:
                    legalName = legalName + ' ' + legalNameSl.get().strip()
                legalName = re.sub(r'''^ *''', r'''''', legalName)  # 去除开头的空格
                legalName = re.sub(r'''"''', r"'", legalName)  # 将双引号变为单引号
            # 发文机关
            organizaationS = tr.xpath('./td[2]/text()')
            organizaation = ''
            if len(organizaationS):
                for organizaationSl in organizaationS:
                    organizaation = organizaation + ' ' + organizaationSl.get().strip()
                organizaation = re.sub(r'''^ *''', r'''''', organizaation)  # 去除开头的空格
            # 发文时间
            publishDateS = tr.xpath('./td[3]/text()')
            publishDate = ''
            if len(publishDateS):
                for publishDateSl in publishDateS:
                    publishDate = publishDate + ' ' + publishDateSl.get().strip()
                publishDate = re.sub(r'''^ *''', r'''''', publishDate)  # 去除开头的空格
            # 生效时间
            effectiveDateS = tr.xpath('./td[4]/text()')
            effectiveDate = ''
            if len(effectiveDateS):
                for effectiveDateSl in effectiveDateS:
                    effectiveDate = effectiveDate + ' ' + effectiveDateSl.get().strip()
                effectiveDate = re.sub(r'''^ *''', r'''''', effectiveDate)  # 去除开头的空格
            # 立法类型
            sortFS = tr.xpath('./td[5]/text()')
            sortF = ''
            if len(sortFS):
                for sortFSl in sortFS:
                    sortF = sortF + ' ' + sortFSl.get().strip()
                sortF = re.sub(r'''^ *''', r'''''', sortF)  # 去除开头的空格
            # 详细网址
            detailUrlS = tr.xpath('./td[9]/a/@href')
            if len(detailUrlS):
                a = tr.xpath('./td[9]/a')[0]
                detailUrl = 'http://' + self.allowed_domains[0] + detailUrlS.get().strip()
                yield response.follow(a, callback=self.detailed, dont_filter=True,
                                      meta={'legalName': legalName, 'organizaation': organizaation,
                                            'publishDate': publishDate, 'effectiveDate': effectiveDate, 'sortF': sortF})

        # 翻页
        trjs = '#yw0 > li.next > a'
        trb = ''
        trb = response.css(trjs)[0]
        while trb:
            self.driver.find_element_by_css_selector(trjs).click()
            time.sleep(2)
            origin_code = self.driver.page_source
            resresponse = HtmlResponse(url=response.url, encoding='utf8', body=origin_code, request=response.url)
            Current_pagejs = '//*[@id="yw0"]/li[@class="page selected"]'
            Current_pagenow = ''
            Current_pageS = resresponse.xpath(Current_pagejs)
            if len(Current_pageS):
                Current_pageSStr = Current_pageS[0].xpath('.//text()')
                if len(Current_pageSStr):
                    Current_pagenow = Current_pageSStr.get().strip()
            if Current_pagenow == self.Current_page:
                break
            else:
                self.Current_page = Current_pagenow

            trs = resresponse.xpath('//*[@id="article-grid"]/table[@class="items"]/tbody/tr')
            for tr in trs:
                # 法律名称
                legalNameS = tr.xpath('./td[1]/text()')
                legalName = ''
                if len(legalNameS):
                    for legalNameSl in legalNameS:
                        legalName = legalName + ' ' + legalNameSl.get().strip()
                    legalName = re.sub(r'''^ *''', r'''''', legalName)  # 去除开头的空格
                    legalName = re.sub(r'''"''', r"'", legalName)  # 将双引号变为单引号
                # 发文机关
                organizaationS = tr.xpath('./td[2]/text()')
                organizaation = ''
                if len(organizaationS):
                    for organizaationSl in organizaationS:
                        organizaation = organizaation + ' ' + organizaationSl.get().strip()
                    organizaation = re.sub(r'''^ *''', r'''''', organizaation)  # 去除开头的空格
                # 发文时间
                publishDateS = tr.xpath('./td[3]/text()')
                publishDate = ''
                if len(publishDateS):
                    for publishDateSl in publishDateS:
                        publishDate = publishDate + ' ' + publishDateSl.get().strip()
                    publishDate = re.sub(r'''^ *''', r'''''', publishDate)  # 去除开头的空格
                # 生效时间
                effectiveDateS = tr.xpath('./td[4]/text()')
                effectiveDate = ''
                if len(effectiveDateS):
                    for effectiveDateSl in effectiveDateS:
                        effectiveDate = effectiveDate + ' ' + effectiveDateSl.get().strip()
                    effectiveDate = re.sub(r'''^ *''', r'''''', effectiveDate)  # 去除开头的空格
                # 立法类型
                sortFS = tr.xpath('./td[5]/text()')
                sortF = ''
                if len(sortFS):
                    for sortFSl in sortFS:
                        sortF = sortF + ' ' + sortFSl.get().strip()
                    sortF = re.sub(r'''^ *''', r'''''', sortF)  # 去除开头的空格
                # 详细网址
                detailUrlS = tr.xpath('./td[9]/a/@href')
                if len(detailUrlS):
                    a = tr.xpath('./td[9]/a')[0]
                    detailUrl = 'http://' + self.allowed_domains[0] + detailUrlS.get().strip()
                    yield response.follow(a, callback=self.detailed, dont_filter=True,
                                          meta={'legalName': legalName, 'organizaation': organizaation,
                                                'publishDate': publishDate, 'effectiveDate': effectiveDate,
                                                'sortF': sortF})
            trjs = '#yw0 > li.next > a'
            trb = ''
            trb = resresponse.css(trjs)[0]

    # 解析详情页
    def detailed(self, response):
        item = MyFileItem()
        if response.status == 200:
            country = 'Laos'
            website = 'laogazette'
            modular = 'law'
            legalNamel = response.meta['legalName']
            organizaation = response.meta['organizaation']
            publishDate = response.meta['publishDate']
            effectiveDate = response.meta['effectiveDate']
            sortF = response.meta['sortF']

            legalName = re.sub(r'''"''', r"'", legalNamel)
            nowurl = response.url
            detailUrl = nowurl
            # 下载文件名
            fina = 'f' + str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())
            # 唯一ID
            systemid = str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())

            if len(detailUrl):
                item['file_urls'] = ''
                item['country'] = country
                item['website'] = website
                item['modular'] = modular
                item['ext'] = 'pdf'
                item['fina'] = fina
                item['title'] = ''
                item['chapNo'] = ''
                item['detailUrl'] = ''
                item['downloadUrl'] = detailUrl
                item['htmls'] = ''
                item['htmlUrl'] = ''
                item['abstract'] = ''
                item['abstractUrl'] = ''

                item['LegalName'] = legalName
                item['Organizaation'] = organizaation
                item['PublishDate'] = publishDate
                item['EffectiveDate'] = effectiveDate
                item['SortA'] = 'LAWCOUNTRYLW'
                item['SortB'] = 'LANGUAGELWY'
                item['SortC'] = ''
                item['SortD'] = ''
                item['SORTE'] = ''
                item['SORTF'] = sortF
                item['Keyword'] = ''
                item['SORTG'] = ''
                item['ChapNo'] = ''
                item['Articles'] = ''
                item['Chapter'] = ''
                item['Section'] = ''
                item['SYS_FLD_DIGITFILENAME'] = fina
                item['FileUrl'] = ''
                item['DownLoadUrl'] = detailUrl
                item['DownLoadWebNameC'] = '老挝官方公报'
                item['DownLoadWebNameE'] = 'Lao Official Gazette'
                item['SYSID'] = systemid
                item['Website'] = 'Laogazette - List Site'
                item['Isconversion'] = '0'
                item['Revisionmark'] = ''

                yield item

