#!/usr/bin/env python
# encoding: utf-8
"""
#-------------------------------------------------------------------#
#                   CONFIDENTIAL --- CUSTOM STUDIOS                 #     
#-------------------------------------------------------------------#
#                                                                   #
#                   @Project Name : Globallawonline                #
#                                                                   #
#                   @File Name    : asean.py                      #
#                                                                   #
#                   @Programmer   : 李建                            #
#                                                                   #  
#                   @Start Date   : 2021/4/7 15:57                 #
#                                                                   #
#                   @Last Update  : 2021/4/7 15:57                 #
#                                                                   #
#-------------------------------------------------------------------#
# Classes:东盟网站的法律数据采集                                                          #
#                                                                   #
#-------------------------------------------------------------------#
"""

import hashlib
import time

import scrapy
from scrapy import signals
from scrapy.http import HtmlResponse
from selenium import webdriver

from ..items import MyFileItem
import re
import json


class Spider(scrapy.Spider):
    name = 'asean_law'
    allowed_domains = ['http://agreement.asean.org/']
    # 东盟法律文书地址
    start_urls = ['http://agreement.asean.org/home/index.html']
    id = 0
    Current_page = ''

    def __init__(self, **kwargs):
        super(Spider, self).__init__(**kwargs)
        self.driver = webdriver.Chrome()  # 调用本地的谷歌浏览器

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(Spider, cls).from_crawler(crawler, *args, **kwargs)
        crawler.signals.connect(spider.closeSpider, signals.spider_closed)
        return spider

    def closeSpider(self):
        self.driver.quit()  # 关闭浏览器

    def parse(self, response):
        # 获得法律文件类别成员列表
        trs = response.xpath('//*[@id="table_id"]/tbody/tr')
        for tr in trs:
            # 找到数据开始的列
            n=0
            for i in range(1,5):
                xuhaoS = tr.xpath('./td[%s]/text()'%str(i))
                xuhao = ''
                if len(xuhaoS):
                    for xuhaoSl in xuhaoS:
                        xuhao = xuhao + xuhaoSl.get().strip()
                if re.search(r'\d\.', xuhao):
                    n=i
                    break
            if n==0: continue   # 如果该法律未编号则不采集
            legalNameS = tr.xpath('./td[%s]/a/text()'%str(n+1))
            legalName = ''
            if len(legalNameS):
                for legalNamel in legalNameS:
                    legalName = legalName + ' ' + legalNamel.get().strip()
                legalName = legalName.strip()    # 去除开头结尾的空格
                legalName = re.sub(r'''"''', r"'", legalName)  # 将双引号变为单引号
            publishDateS = tr.xpath('./td[%s]/text()'%str(n+2))
            publishDate = ''
            if len(publishDateS):
                for publishDatel in publishDateS:
                    publishDate = publishDate + ' ' + publishDatel.get().strip()
                publishDate = publishDate.strip()  # 去除开头结尾的空格
            effectiveDateS = tr.xpath('./td[%s]/text()' % str(n + 4))
            effectiveDate = ''
            if len(effectiveDateS):
                for effectiveDatel in effectiveDateS:
                    effectiveDate = effectiveDate + ' ' + effectiveDatel.get().strip()
                effectiveDate = effectiveDate.strip()  # 去除开头结尾的空格
            # 法律是否需要
            IsrequriedS = tr.xpath('./td[%s]/text()' % str(n + 3))
            Isrequried = ''
            if len(IsrequriedS):
                for Isrequriedl in IsrequriedS:
                    Isrequried = Isrequried + ' ' + Isrequriedl.get().strip()
                Isrequried = Isrequried.strip()  # 去除开头结尾的空格
            if re.search("not required", Isrequried, re.I):continue    # 如果该条约不需要则不采集
            # 法律是否终止
            IsforceS = tr.xpath('./td[%s]/div/a/text()' % str(n + 4))
            Isforce = ''
            if len(IsforceS):
                Isforce = IsforceS.get().strip()
            if not re.search("In Force \(IF\)", Isforce, re.I): continue     # 若法律终止则不采集

            detailUrlS = tr.xpath('./td[%s]/a'%str(n+1))
            if len(detailUrlS):
                yield response.follow(detailUrlS[0], callback=self.detailed, dont_filter=True,
                                      meta={'legalName': legalName,'publishDate': publishDate,
                                            'effectiveDate': effectiveDate})
        # 翻页
        nextjs = '#search_result > div > div.pagination.pagination-centered > ul > li.active~li > a'
        nexta = response.css(nextjs)
        while len(nexta):
            self.driver.find_element_by_css_selector(nextjs).click()  # 请求页面，会打开一个浏览器窗口
            time.sleep(2)
            origin_code = self.driver.page_source
            resresponse = HtmlResponse(url=response.url, encoding='utf8', body=origin_code, request=response.url)
            trs = resresponse.xpath('//*[@id="table_id"]/tbody/tr')
            for tr in trs:
                # 找到数据开始的列
                n = 0
                for i in range(1, 5):
                    xuhaoS = tr.xpath('./td[%s]/text()' % str(i))
                    xuhao = ''
                    if len(xuhaoS):
                        for xuhaoSl in xuhaoS:
                            xuhao = xuhao + xuhaoSl.get().strip()
                    if re.search(r'\d\.', xuhao):
                        n = i
                        break
                if n == 0: continue  # 如果该法律未编号则不采集
                legalNameS = tr.xpath('./td[%s]/a/text()' % str(n + 1))
                legalName = ''
                if len(legalNameS):
                    for legalNamel in legalNameS:
                        legalName = legalName + ' ' + legalNamel.get().strip()
                    legalName = legalName.strip()  # 去除开头结尾的空格
                    legalName = re.sub(r'''"''', r"'", legalName)  # 将双引号变为单引号
                publishDateS = tr.xpath('./td[%s]/text()' % str(n + 2))
                publishDate = ''
                if len(publishDateS):
                    for publishDatel in publishDateS:
                        publishDate = publishDate + ' ' + publishDatel.get().strip()
                    publishDate = publishDate.strip()  # 去除开头结尾的空格
                effectiveDateS = tr.xpath('./td[%s]/text()' % str(n + 4))
                effectiveDate = ''
                if len(effectiveDateS):
                    for effectiveDatel in effectiveDateS:
                        effectiveDate = effectiveDate + ' ' + effectiveDatel.get().strip()
                    effectiveDate = effectiveDate.strip()  # 去除开头结尾的空格
                # 法律是否需要
                IsrequriedS = tr.xpath('./td[%s]/text()' % str(n + 3))
                Isrequried = ''
                if len(IsrequriedS):
                    for Isrequriedl in IsrequriedS:
                        Isrequried = Isrequried + ' ' + Isrequriedl.get().strip()
                    Isrequried = Isrequried.strip()  # 去除开头结尾的空格
                if re.search("not required", Isrequried, re.I): continue  # 如果该条约不需要则不采集
                # 法律是否终止
                IsforceS = tr.xpath('./td[%s]/div/a/text()' % str(n + 4))
                Isforce = ''
                if len(IsforceS):
                    Isforce = IsforceS.get().strip()
                if not re.search("In Force \(IF\)", Isforce, re.I): continue  # 若法律终止则不采集

                detailUrlS = tr.xpath('./td[%s]/a' % str(n + 1))
                if len(detailUrlS):
                    yield response.follow(detailUrlS[0], callback=self.detailed, dont_filter=True,
                                          meta={'legalName': legalName, 'publishDate': publishDate,
                                                'effectiveDate': effectiveDate})
            # 翻页
            nextjs = '#search_result > div > div.pagination.pagination-centered > ul > li.active~li > a'
            nexta = response.css(nextjs)

    # 解析详情页
    def detailed(self, response):
        item = MyFileItem()
        if response.status == 200:
            def iadd():
                self.id += 1
                return self.id

            detailUrl = response.url
            legalName = response.meta['legalName']
            publishDate = response.meta['publishDate']
            effectiveDate = response.meta['effectiveDate']

            fina = 'f' + str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())
            # 唯一ID
            systemid = str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())
            iid = iadd()

            item['file_urls'] = ''
            item['country'] = 'Asean'
            item['website'] = 'asean'
            item['modular'] = 'law'
            item['title'] = legalName
            item['ext'] = 'pdf'
            item['fina'] = fina
            item['chapNo'] = ''
            item['detailUrl'] = ''
            item['downloadUrl'] = detailUrl
            item['htmls'] = ''
            item['htmlUrl'] = ''
            item['abstract'] = ''
            item['abstractUrl'] = ''

            item['LegalName'] = legalName
            item['Organizaation'] = ''
            item['PublishDate'] = publishDate
            item['EffectiveDate'] = effectiveDate
            item['SortA'] = 'LAWCOUNTRYDM'
            item['SortB'] = 'LANGUAGEYY'
            item['SortC'] = ''
            item['SortD'] = ''
            item['SORTE'] = ''
            item['SORTF'] = ''
            item['Keyword'] = ''
            item['SORTG'] = ''
            item['ChapNo'] = ''
            item['Articles'] = ''
            item['Chapter'] = ''
            item['Section'] = ''
            item['SYS_FLD_DIGITFILENAME'] = fina
            item['FileUrl'] = ''
            item['DownLoadUrl'] = detailUrl
            item['DownLoadWebNameC'] = '东盟法律文书'
            item['DownLoadWebNameE'] = 'ASEAN Legal Instruments'
            item['SYSID'] = systemid
            item['Website'] = 'ASEAN'
            item['Isconversion'] = '0'
            item['Revisionmark'] = ''

            yield item


