import hashlib
import time
import re
import scrapy
from scrapy import signals
from scrapy.http import HtmlResponse
from selenium.webdriver.chrome.options import Options

from ..items import MyFileItem
from selenium import webdriver


class Spider(scrapy.Spider):
    name = 'aseanlawinfo'
    allowed_domains = 'http://asean.pkulaw.cn'
    # 中国-东盟国际条约网址
    start_urls = ['http://asean.pkulaw.cn/List?categoryId=002&language=zh-CN']
    id = 0
    Current_page = ''

    def __init__(self, **kwargs):
        super(Spider, self).__init__(**kwargs)
        chrome_options = Options()
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--disable-dev-shm-usage')
        chrome_options.add_argument('--headless')
        # self.driver = webdriver.Chrome(chrome_options=options)  # 调用本地的谷歌浏览器
        self.driver = webdriver.Chrome(chrome_options=chrome_options)

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(Spider, cls).from_crawler(crawler, *args, **kwargs)
        crawler.signals.connect(spider.closeSpider, signals.spider_closed)
        return spider

    def closeSpider(self):
        self.driver.quit()  # 关闭浏览器

    def parse(self, response):
        # 获得首页国家列表
        ctrsl = response.xpath('//*[@class = "kLevelOne111"]')
        ctrs = ctrsl.xpath('.//a')
        for ctr in ctrs:
            # 国家
            countryS = ctr.xpath('.//text()')
            country = ''
            if len(countryS):
                for countrySl in countryS:
                    country = country + ' ' + countrySl.get().strip()
                country = re.sub(r'''^ *''', r'''''', country)  # 去除开头的空格
                country = re.sub(r'''\s*$''', r'''''', country)  # 去除结尾的空格
            if not (country == '中国'):
                # 模块网站
                modelUrlS = ctr.xpath('./@href')
                if len(modelUrlS):
                    modelUrl = self.allowed_domains + modelUrlS.get().strip()
                    self.driver.get(modelUrl)
                    time.sleep(2)
                    origin_code = self.driver.page_source
                    resresponse = HtmlResponse(url=response.url, encoding='utf8', body=origin_code,
                                               request=response.url)

                    # 获得国家网址成员列表
                    trs = resresponse.xpath('//*[@id="div_articleList"]/ul/li')
                    for tr in trs:
                        # 详细网址
                        detailUrlS = tr.xpath('.//a/@href')
                        detailUrl = ''
                        if len(detailUrlS):
                            a = tr.xpath('.//a')[0]
                            detailUrl = 'https://' + self.allowed_domains[0] + detailUrlS.get().strip()
                        # 标题
                        titleS = tr.xpath('.//a/text()')
                        title = ''
                        if len(titleS):
                            for titleSl in titleS:
                                title = title + ' ' + titleSl.get().strip()
                            title = re.sub(r'''^ *''', r'''''', title)  # 去除开头的空格
                        # 缔约国
                        statepartyS = tr.xpath('./ul/li/p/text()')
                        stateparty = ''
                        if len(statepartyS):
                            for statepartySl in statepartyS:
                                stateparty = stateparty + ' ' + statepartySl.get().strip()
                            stateparty = re.sub(r'''^ *''', r'''''', stateparty)  # 去除开头的空格

                        if len(detailUrl):
                            yield response.follow(a, callback=self.detailed, dont_filter=True,
                                                  meta={'detailUrl': detailUrl, 'country': country,
                                                        'title': title, 'stateparty': stateparty})

                    # 翻页
                    trjs = '#div_articleList > div > ul > li:last-child> a'
                    trb = ''
                    trb = response.css(trjs)[0]
                    if trb:
                        trbtxtS = trb.xpath('./text()')
                        trbtext = trbtxtS.get().strip()
                        while trbtext == '下一页':
                            self.driver.find_element_by_css_selector(trjs).click()
                            time.sleep(2)
                            origin_code = self.driver.page_source
                            resresponse = HtmlResponse(url=response.url, encoding='utf8', body=origin_code,
                                                       request=response.url)
                            Current_pagejs = '//*[@id="div_articleList"]/div/ul/li[@class="active"]'
                            Current_pagenow = ''
                            Current_pageS = resresponse.xpath(Current_pagejs)
                            if len(Current_pageS):
                                Current_pageSStr = Current_pageS[0].xpath('.//text()')
                                if len(Current_pageSStr):
                                    Current_pagenow = Current_pageSStr.get().strip()
                            if Current_pagenow == self.Current_page:
                                break
                            else:
                                self.Current_page = Current_pagenow

                            trs = resresponse.xpath('//*[@id="div_articleList"]/ul/li')
                            for tr in trs:
                                # 详细网址
                                detailUrlS = tr.xpath('.//a/@href')
                                detailUrl = ''
                                if len(detailUrlS):
                                    a = tr.xpath('.//a')[0]
                                    detailUrl = 'https://' + self.allowed_domains[0] + detailUrlS.get().strip()
                                # 标题
                                titleS = tr.xpath('.//a/text()')
                                title = ''
                                if len(titleS):
                                    for titleSl in titleS:
                                        title = title + ' ' + titleSl.get().strip()
                                    title = re.sub(r'''^ *''', r'''''', title)  # 去除开头的空格
                                # 缔约国
                                statepartyS = tr.xpath('./ul/li/p/text()')
                                stateparty = ''
                                if len(statepartyS):
                                    for statepartySl in statepartyS:
                                        stateparty = stateparty + ' ' + statepartySl.get().strip()
                                    stateparty = re.sub(r'''^ *''', r'''''', stateparty)  # 去除开头的空格

                                if len(detailUrl):
                                    yield response.follow(a, callback=self.detailed, dont_filter=True,
                                                          meta={'detailUrl': detailUrl, 'country': country,
                                                                'title': title, 'stateparty': stateparty})
                                trjs = '#div_articleList > div > ul > li:last-child> a'
                                trbtext = ''
                                trb = ''
                                trb = resresponse.css(trjs)[0]
                                if trb:
                                    trbtxtS = trb.xpath('./text()')
                                    trbtext = trbtxtS.get().strip()

    # 解析详情页
    def detailed(self, response):
        item = MyFileItem()
        if response.status == 200:

            COUNTRY = {'中国': 'LAWCOUNTRYZG', '新加坡': 'LAWCOUNTRYXJP', '菲律宾': 'LAWCOUNTRYFLB',
                       '文莱': 'LAWCOUNTRYWL', '柬埔寨': 'LAWCOUNTRYJPZ', '印度尼西亚': 'LAWCOUNTRYYDNXY',
                       '越南': 'LAWCOUNTRYYN', '马来西亚': 'LAWCOUNTRYMLXY', '缅甸': 'LAWCOUNTRYMD',
                       '老挝': 'LAWCOUNTRYLW', '泰国': 'LAWCOUNTRYTG'}
            COUNTRYpath = {'新加坡': 'Singapore', '菲律宾': 'Philippines',
                           '文莱': 'Brunei', '柬埔寨': 'Cambodia', '印度尼西亚': 'Indonesia',
                           '越南': 'Vietnam', '马来西亚': 'Malaysia', '缅甸': 'Myanmar',
                           '老挝': 'Laos', '泰国': 'Thailand'}
            # 国别及文件信息
            countryl = response.meta['country']
            SortA = COUNTRY[countryl]
            country = COUNTRYpath[countryl]
            website = 'asean'
            modular = 'treaty'
            title = response.meta['title']
            statepartyS = response.meta['stateparty']
            nowurl = response.url
            detailUrl = nowurl
            downloadUrl = detailUrl

            # 利用正则表达式获取缔约国字符
            res_tr = r'''[\s\S\n\r]*?(?=/)'''  # 获取'/'前面的字符的正则表达式
            re_tr = re.findall(res_tr, statepartyS, re.S)  # 利用正则表达式获取字符串并存到链表
            stateparty = re_tr[0]

            # 条约全文
            detailS = response.xpath('//*[@class="content"]')
            detail = ''
            if len(detailS):
                for detailSl in detailS:
                    detail = detail + ' ' + detailSl.get().strip()

            # 发表时间
            publishDateS = response.xpath('//*[@class="laiyuanxinxi"]/span[1]/text()')
            publishDate = ''
            if len(publishDateS):
                for publishDateSl in publishDateS:
                    publishDate = publishDate + ' ' + publishDateSl.get().strip()
                publishDate = re.sub(r'''^ *''', r'''''', publishDate)  # 去除开头的空格

            # 下载文件名
            fina = ''
            if len(detailUrl) > 0:
                fina = 'f' + str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())

            # 唯一ID
            systemid = str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())

            if len(detail):
                item['file_urls'] = ''
                item['country'] = country
                item['website'] = website
                item['modular'] = modular
                item['ext'] = 'pdf'
                item['fina'] = fina
                item['title'] = title
                item['detail'] = detail
                item['detailUrl'] = ''
                item['downloadUrl'] = ''

                item['Title'] = title
                item['SortA'] = SortA
                item['StateParty'] = stateparty
                item['PublishDate'] = publishDate
                item['EffectiveDate'] = ''
                item['FullText'] = ''
                item['SortB'] = 'LANGUAGEZW'
                item['SortC'] = ''
                item['Articles'] = ''
                item['Chapter'] = ''
                item['Section'] = ''
                item['SYS_FLD_DIGITFILENAME'] = fina
                item['FileUrl'] = ''
                item['DownLoadUrl'] = downloadUrl
                item['DownLoadWebNameC'] = '中国-东盟国家法律与司法信息中心'
                item['DownLoadWebNameE'] = "LEGAL AND JUDICIAL INFORMATION CENTER FOR CHINA-ASEAN COUNTRIES"
                item['SYSID'] = systemid
                item['Website'] = 'aseanlawinfo'
                item['Isconversion'] = '1'

                yield item
