import hashlib
import re
import time

import scrapy
from scrapy import signals
from scrapy.http import HtmlResponse

from ..items import MyFileItem
from selenium import webdriver


class Spider(scrapy.Spider):
    name = 'tha_kris_cons'
    allowed_domains = ['krisdika.go.th']
    # 中国-东盟国家法律与司法信息中心法律法规网址
    start_urls = ['http://www.krisdika.go.th/web/guest/law?p_p_id=LawPortlet_INSTANCE_qr7aikT1ls4G&p_p_state=normal'
                  '&p_p_mode=view&_LawPortlet_INSTANCE_qr7aikT1ls4G_javax.portlet.action=selectLawTypeMenu'
                  '&_LawPortlet_INSTANCE_qr7aikT1ls4G_lawTypeId=1&p_auth=QKyH3td2&p_p_lifecycle=0']
    id = 0
    Current_page = ''

    def __init__(self, **kwargs):
        super(Spider, self).__init__(**kwargs)
        self.driver = webdriver.Chrome()  # 调用本地的谷歌浏览器

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(Spider, cls).from_crawler(crawler, *args, **kwargs)
        crawler.signals.connect(spider.closeSpider, signals.spider_closed)
        return spider

    def closeSpider(self):
        self.driver.quit()  # 关闭浏览器

    def parse(self, response):
        def driverclickcss(clickjs, sleep_time):  # 这是基于css的按钮点击函数
            self.driver.find_element_by_css_selector(clickjs).click()
            time.sleep(sleep_time)
            origin_code = self.driver.page_source
            res = HtmlResponse(url=response.url, encoding='utf8', body=origin_code, request=response.url)
            return res

        def IsExpand(Metatr):
            try:
                detailurllist = Metatr.xpath('./td[2]/a/@href')
                if len(detailurllist):
                    return False
                else:
                    detailtextlist = Metatr.xpath('./td[1]/div/span/a/@href')
                    if len(detailtextlist):
                        return False
                    else:
                        return True
            except Exception as e:
                print(str(e))
                return True

        def FindExpandJs(responsesejs, jstr, jstrS):  # 若该框为下拉框，则找到该框的js语言
            trjsS = ''
            downexpandS = jstr.xpath('.//*[@id="nodeval"]/text()')
            downexpand = ''
            if len(downexpandS):
                downexpand = downexpandS.get().strip()
            for i in range(len(jstrS)):
                downexpandJS = "#lawTreeGrid > div.objbox > table > tbody > tr:nth-child(%s) > " \
                               "td:nth-child(1) > div > span" % str(i + 2)
                downexpandJSr = ''
                downexpandJStext = ''
                try:
                    downexpandJSr = responsesejs.css(downexpandJS)
                except Exception as e:
                    print(str(e))
                if len(downexpandJSr):
                    downexpandJStr = downexpandJSr[0].xpath('./text()')
                    if len(downexpandJStr):
                        downexpandJStext = downexpandJStr.get().strip()
                    if downexpandJStext == downexpand:
                        trjsS = "#lawTreeGrid > div.objbox > table > tbody > tr:nth-child(%s) > td:nth-child(1) > div " \
                                "> img:nth-child(1)" % str(i + 2)
                        return trjsS
            return trjsS

        def detaileditem(detailUrl, ext, titletext):
            item = MyFileItem()
            country = 'Thailand'
            website = 'Krisdika'
            modular = 'Constitution'
            Isconversion = '0'
            file_url = ''
            downloadUrl = ''
            # 下载文件名
            fina = ''
            if len(detailUrl) > 0:
                fina = 'f' + str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())
            # 唯一ID
            systemid = str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())
            if ext == 'pdf':
                downloadUrl = detailUrl
            elif ext == 'tif':
                file_url = detailUrl
            elif not ext:
                fina = ''

            if file_url:
                item['file_urls'] = [file_url]
            else:
                item['file_urls'] = ''
            item['country'] = country
            item['website'] = website
            item['modular'] = modular
            item['ext'] = ext
            item['fina'] = fina
            item['title'] = titletext
            item['chapNo'] = ''
            item['detailUrl'] = ''
            item['downloadUrl'] = downloadUrl
            item['htmls'] = ''
            item['htmlUrl'] = ''
            item['abstract'] = ''
            item['abstractUrl'] = ''
            item['LegalName'] = titletext
            item['Organizaation'] = ''
            item['PublishDate'] = ''
            item['EffectiveDate'] = ''
            item['SortA'] = 'LAWCOUNTRYTG'
            item['SortB'] = 'LANGUAGETY'
            item['SortC'] = ''
            item['SortD'] = ''
            item['SORTE'] = ''
            item['SORTF'] = ''
            item['Keyword'] = ''
            item['SORTG'] = ''
            item['ChapNo'] = ''
            item['Articles'] = ''
            item['Chapter'] = ''
            item['Section'] = ''
            item['SYS_FLD_DIGITFILENAME'] = fina
            item['FileUrl'] = ''
            item['DownLoadUrl'] = detailUrl
            item['DownLoadWebNameC'] = '泰国国务委员会办公室'
            item['DownLoadWebNameE'] = "Office of state council of thailand"
            item['SYSID'] = systemid
            item['Website'] = 'Krisdika Constitution'
            item['Isconversion'] = Isconversion
            item['Revisionmark'] = ''
            return item

        resresponse = response
        trs = resresponse.xpath('//*[@id="lawTreeGrid"]/div[2]/table/tbody/tr')
        for tri in range(1, 21):
            try:
                tr = trs[tri]
            except Exception as e:
                print(str(e))
            vertr = IsExpand(tr)
            if vertr:
                trjs = FindExpandJs(resresponse, tr, trs)
                if len(trjs):
                    resresponse = driverclickcss(trjs, 2)
                    trs = resresponse.xpath('//*[@id="lawTreeGrid"]/div[2]/table/tbody/tr')
            else:
                detailUrlSLS = tr.xpath('./td[2]/a')  # 详细网址
                pdfdetailUrl = ''
                tifdetailUrl = ''
                emptydetailUrl = ''
                if len(detailUrlSLS):
                    for detailUrlSl in detailUrlSLS:
                        detailUrlS = detailUrlSl.xpath('./@href')
                        detailUrlr = ''
                        if len(detailUrlS):
                            detailUrlr = detailUrlS.get().strip()
                        res_pdftr = r'''.=pdf'''  # 从字符串中判断是否为pdf网址的正则表达式
                        res_tiftr = r'''.=tif'''  # 从字符串中判断是否为tif格式的正则表达式
                        re_pdftr = re.findall(res_pdftr, detailUrlr, re.S)  # 利用正则表达式判断是否为pdf网址
                        re_tiftr = re.findall(res_tiftr, detailUrlr, re.S)
                        if len(re_pdftr):
                            pdfdetailUrl = detailUrlr
                        elif len(re_tiftr):
                            tifdetailUrl = detailUrlr
                else:
                    detailUrlSLS = tr.xpath('./td[1]/div/span/a/@href')
                    if len(detailUrlSLS):
                        emptydetailUrl = detailUrlSLS.get().strip()

                titleS = tr.xpath('./td[1]//text()')  # 标题
                title = ''
                if len(titleS):
                    for titleSl in titleS:
                        title = title + ' ' + titleSl.get().strip()
                    title = re.sub(r'''^ *''', r'''''', title)  # 去除开头的空格

                if len(pdfdetailUrl):
                    pdfexi = 'pdf'
                    itempdf = detaileditem(pdfdetailUrl, pdfexi, title)
                    yield itempdf

                elif len(tifdetailUrl):
                    tifexi = 'tif'
                    itemtif = detaileditem(tifdetailUrl, tifexi, title)
                    yield itemtif

                elif len(emptydetailUrl):
                    emptyexi = ''
                    itemempty = detaileditem(emptydetailUrl, emptyexi, title)
                    yield itemempty

        # 翻页
        trjs = '#lawPagingArea div div:nth-child(4) img'
        trb = ''
        trb = resresponse.css(trjs)
        while trb:
            resresponse = driverclickcss(trjs, 2)
            # 获取当前页码并判断当前页码是否已爬取
            Current_pagejs = '#lawPagingArea > div > div:nth-child(6) > div'
            Current_pagenow = ''
            Current_pageS = resresponse.css(Current_pagejs)
            if len(Current_pageS):
                Current_pageSStr = Current_pageS[0].xpath('.//text()')
                if len(Current_pageSStr):
                    Current_pagenow = Current_pageSStr.get().strip()
            if Current_pagenow == self.Current_page:
                break
            else:
                self.Current_page = Current_pagenow

            trs = resresponse.xpath('//*[@id="lawTreeGrid"]/div[2]/table/tbody/tr')
            for tri in range(1, 21):
                try:
                    tr = trs[tri]
                except Exception as e:
                    print(str(e))
                vertr = IsExpand(tr)
                if vertr:
                    trjs = FindExpandJs(resresponse, tr, trs)
                    if len(trjs):
                        resresponse = driverclickcss(trjs, 2)
                        trs = resresponse.xpath('//*[@id="lawTreeGrid"]/div[2]/table/tbody/tr')
                else:
                    detailUrlSLS = tr.xpath('./td[2]/a')  # 详细网址
                    pdfdetailUrl = ''
                    tifdetailUrl = ''
                    emptydetailUrl = ''
                    if len(detailUrlSLS):
                        for detailUrlSl in detailUrlSLS:
                            detailUrlS = detailUrlSl.xpath('./@href')
                            detailUrlr = ''
                            if len(detailUrlS):
                                detailUrlr = detailUrlS.get().strip()
                            res_pdftr = r'''.=pdf'''  # 从字符串中判断是否为pdf网址的正则表达式
                            res_tiftr = r'''.=tif'''
                            re_pdftr = re.findall(res_pdftr, detailUrlr, re.S)  # 利用正则表达式判断是否为pdf网址
                            re_tiftr = re.findall(res_tiftr, detailUrlr, re.S)
                            if len(re_pdftr):
                                pdfdetailUrl = detailUrlr
                            elif len(re_tiftr):
                                tifdetailUrl = detailUrlr
                    else:
                        detailUrlSLS = tr.xpath('./td[1]/div/span/a/@href')
                        if len(detailUrlSLS):
                            emptydetailUrl = detailUrlSLS.get().strip()

                    titleS = tr.xpath('./td[1]//text()')  # 标题
                    title = ''
                    if len(titleS):
                        for titleSl in titleS:
                            title = title + ' ' + titleSl.get().strip()
                        title = re.sub(r'''^ *''', r'''''', title)  # 去除开头的空格

                    if len(pdfdetailUrl):
                        pdfexi = 'pdf'
                        itempdf = detaileditem(pdfdetailUrl, pdfexi, title)
                        yield itempdf

                    elif len(tifdetailUrl):
                        tifexi = 'tif'
                        itemtif = detaileditem(tifdetailUrl, tifexi, title)
                        yield itemtif

                    elif len(emptydetailUrl):
                        emptyexi = ''
                        itemempty = detaileditem(emptydetailUrl, emptyexi, title)
                        yield itemempty

            # 翻页的按钮
            trjs = '#lawPagingArea div div:nth-child(4) img'
            trb = ''
            trb = resresponse.css(trjs)

        self.driver.close()

