import hashlib

import scrapy
from scrapy import signals
from scrapy.http import HtmlResponse
from selenium.webdriver.chrome.options import Options

from ..items import MyFileItem
import re
import json
from selenium import webdriver
import time


class Mysagc (scrapy.Spider):
    name = 'mys_agc_lom'
    allowed_domains = ['agc.gov.my/agcportal/']
    # 马来西亚法律体系数据库地址
    start_urls = ['http://www.agc.gov.my/agcportal/index.php?r=portal2/lom&menu_id=b21XYmExVUhFOE4wempZdE1vNUVKdz09&page=1']
    id = 0
    Current_page = ''

    def __init__(self, **kwargs):
        super(Mysagc, self).__init__(**kwargs)
        chrome_options = Options()  # 设置谷歌浏览器后台调用，无界面模式
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--disable-dev-shm-usage')
        chrome_options.add_argument('--headless')
        self.driver = webdriver.Chrome(chrome_options=chrome_options)  # 调用本地的谷歌浏览器

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(Mysagc, cls).from_crawler(crawler, *args, **kwargs)
        crawler.signals.connect(spider.closeSpider, signals.spider_closed)
        return spider

    def closeSpider(self):
        self.driver.quit()  # 关闭浏览器

    def parse(self, response):
        def driverclickxpath(clickxpatn, sleep_time):       # 这是基于xpath的下一页按钮点击函数
            self.driver.find_element_by_xpath(clickxpatn).click()
            time.sleep(sleep_time)
            origin_code = self.driver.page_source
            res = HtmlResponse(url=response.url, encoding='utf8', body=origin_code, request=response.url)
            return res

        def driverclickxpathd(clickxpatn, sleep_time):       # 这是基于xpath的详情网址按钮点击函数
            self.driver.find_element_by_xpath(clickxpatn).click()
            time.sleep(sleep_time)
            Windowshandle = self.driver.window_handles
            try:
                self.driver.switch_to.window(Windowshandle[1])
            except Exception as e:
                print(str(e))
            nowurl = self.driver.current_url
            self.driver.close()
            self.driver.switch_to.window(Windowshandle[0])
            origin_code = self.driver.page_source
            res = HtmlResponse(url=nowurl, encoding='utf8', body=origin_code, request=response.url)
            return res
        # 获得成员列表
        resresponse = response
        trs = resresponse.xpath('//*[@id="tablelom"]/tbody/tr')
        for i in range(len(trs)):
            tr = trs[i]
            # 法律名称
            titleS = tr.xpath('./td[2]/a//text()')
            title = ''
            if len(titleS):
                for titleSl in titleS:
                    title = title + ' ' + titleSl.get().strip()
                title = re.sub(r'''^ *''', r'''''', title)  # 去除开头的空格
                title = re.sub(r'''"''', r"'", title)  # 将双引号变为单引号
            # 详细网址
            detailUrlS = tr.xpath('./td[2]/a/@href')
            detailUrl = ''
            if len(detailUrlS):
                detailUrl = 'https://' + self.allowed_domains[0] + detailUrlS.get().strip()
            if len(detailUrl):
                detailxpath = '//*[@id="tablelom"]/tbody/tr[%s]/td[2]/a' % str(i+1)
                resresponse = driverclickxpathd(detailxpath, 2)
                detailUrl = resresponse.url
                yield response.follow(detailUrl, callback=self.detailed, dont_filter=True,
                                      meta={'title': title, 'detailUrl': detailUrl})

        # 翻页
        nextaS = resresponse.xpath('//*[@id="tablelom"]/tbody/tr[last()]/td/div/a')
        while len(nextaS):
            for j in range(len(nextaS)):
                nexta = nextaS[j]
                nexttextS = nexta.xpath('./text()')
                if len(nexttextS):
                    nexttext = nexttextS.get().strip()
                    if nexttext == "Next":
                        nextn = j
            nextxpath = '//*[@id="tablelom"]/tbody/tr[last()]/td/div/a[%s]' % str(nextn + 1)
            resresponse = driverclickxpath(nextxpath, 30)
            Current_pagejs = '//*[@id="tablelom"]/tbody/tr[last()]/td/div/*[@class="current"]'
            Current_pagenow = ''
            Current_pageS = resresponse.xpath(Current_pagejs)
            if len(Current_pageS):
                Current_pageSStr = Current_pageS[0].xpath('.//text()')
                if len(Current_pageSStr):
                    Current_pagenow = Current_pageSStr.get().strip()
            if Current_pagenow == self.Current_page:
                break
            else:
                self.Current_page = Current_pagenow
            trs = resresponse.xpath('//*[@id="tablelom"]/tbody/tr')
            for i in range(len(trs)):
                tr = trs[i]
                # 法律名称
                titleS = tr.xpath('./td[2]/a//text()')
                title = ''
                if len(titleS):
                    for titleSl in titleS:
                        title = title + ' ' + titleSl.get().strip()
                    title = re.sub(r'''^ *''', r'''''', title)  # 去除开头的空格
                    title = re.sub(r'''"''', r"'", title)  # 将双引号变为单引号
                # 详细网址
                detailUrlS = tr.xpath('./td[2]/a/@href')
                detailUrl = ''
                if len(detailUrlS):
                    detailUrl = 'https://' + self.allowed_domains[0] + detailUrlS.get().strip()
                if len(detailUrl):
                    detailxpath = '//*[@id="tablelom"]/tbody/tr[%s]/td[2]/a' % str(i + 1)
                    resresponse = driverclickxpathd(detailxpath, 2)
                    detailUrl = resresponse.url
                    yield response.follow(detailUrl, callback=self.detailed, dont_filter=True,
                                          meta={'title': title, 'detailUrl': detailUrl})
            nextaS = ''
            nextaS = resresponse.xpath('//*[@id="tablelom"]/tbody/tr[last()]/td/div/a')

    # 解析详情页
    def detailed(self, response):
        item = MyFileItem()
        if response.status == 200:

            # 法律详细网址
            detailUrl = response.meta['detailUrl']
            title = response.meta['title']
            country = 'Malaysia'
            website = 'agcgov'
            modular = 'lom'
            # 下载文件格式
            ext = 'pdf'
            fina = ''       # 下载文件名
            if len(detailUrl) > 0:
                fina = 'f' + str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())
            systemid = str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())  # 唯一ID

            if len(detailUrl):
                item['file_urls'] = ''
                item['country'] = country
                item['website'] = website
                item['modular'] = modular
                item['title'] = title
                item['ext'] = ext
                item['fina'] = fina
                item['chapNo'] = ''
                item['detailUrl'] = ''
                item['downloadUrl'] = detailUrl
                item['htmls'] = ''
                item['htmlUrl'] = ''
                item['abstract'] = ''
                item['abstractUrl'] = ''

                item['LegalName'] = title
                item['Organizaation'] = ''
                item['PublishDate'] = ''
                item['EffectiveDate'] = ''
                item['SortA'] = 'LAWCOUNTRYMLXY'
                item['SortB'] = 'LANGUAGEYY'
                item['SortC'] = ''
                item['SortD'] = ''
                item['SORTE'] = ''
                item['SORTF'] = ''
                item['Keyword'] = ''
                item['SORTG'] = ''
                item['ChapNo'] = ''
                item['Articles'] = ''
                item['Chapter'] = ''
                item['Section'] = ''
                item['SYS_FLD_DIGITFILENAME'] = fina
                item['FileUrl'] = ''
                item['DownLoadUrl'] = detailUrl
                item['DownLoadWebNameC'] = '马来西亚总检察院官网'
                item['DownLoadWebNameE'] = "Attorney General Chamber Official Portal"
                item['SYSID'] = systemid
                item['Website'] = 'Laws of Malaysia'
                item['Isconversion'] = '0'
                item['Revisionmark'] = ''

                yield item

