import hashlib

import scrapy
from scrapy import signals
from selenium.webdriver.chrome.options import Options

from ..items import MyFileItem
import re
from selenium import webdriver
import time
from scrapy.http import response, HtmlResponse


class Ssosg(scrapy.Spider):
    name = 'bru_jud_gov_judm'
    allowed_domains = ['judiciary.gov.bn']
    # 文莱最高法院网站
    start_urls = ['http://judiciary.gov.bn/SJD%20Site%20Pages/Judgment%20Search.aspx']
    id = 0

    def __init__(self, **kwargs):
        super(Ssosg, self).__init__(**kwargs)
        options = Options()
        options.add_argument('--no-sandbox')
        options.add_argument('--disable-dev-shm-usage')
        options.add_argument('--headless')
        self.driver = webdriver.Chrome(chrome_options=options)

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(Ssosg, cls).from_crawler(crawler, *args, **kwargs)
        crawler.signals.connect(spider.closeSpider, signals.spider_closed)
        return spider

    def closeSpider(self):
        self.driver.quit()  # 关闭浏览器

    def parse(self, response):
        html = response.text
        res_tr = r'''(?<=ctx.ctxId = )[1-9][0-9]*'''  # 从网页源码中获取字符的正则表达式
        re_tr = re.findall(res_tr, html, re.S)  # 利用正则表达式获取字符串并存到链表
        cid = re_tr[0]  # 取链表的第一个值
        titl = 'titl'
        tbod = 'tbod'
        organids = ['-1_', '-2_', '-3_', '-4_']
        organidds = {'-1_': ['1', '2'], '-2_': ['1', '2'], '-3_': ['1', '2', '3'], '-4_': ['1']}
        for organidr in organids:
            organiddl = organidds[organidr]
            for organidd in organiddl:
                organid = titl + cid + organidr
                typeid = titl + cid + organidr + organidd + '_'
                trsid = tbod + cid + organidr + organidd + '__'
                trspageid = tbod + cid + organidr + organidd + '__page'

                # 审理机关
                judgAgencyjs = '//*[@id="%s"]/tr/td/text()' % organid
                judgAgencyS = response.xpath(judgAgencyjs)
                judgAgency = ''
                if len(judgAgencyS):
                    for judgAgencySl in judgAgencyS:
                        judgAgency = judgAgency + ' ' + judgAgencySl.get().strip()
                    res_tr = r'''(?<=:)[\s\S\n\r]*'''  # 从字符中抽取所需字符串的正则表达式
                    re_tr = re.findall(res_tr, judgAgency, re.S)  # 利用正则表达式获取字符串并存到链表
                    judgAgency = re_tr[0]  # 取链表的第一个值

                # 案件类型
                sortCjs = '//*[@id="%s"]/tr/td/text()' % typeid
                sortCS = response.xpath(sortCjs)
                sortC = ''
                if len(sortCS):
                    for sortCSl in sortCS:
                        sortC = sortC + ' ' + sortCSl.get().strip()
                    res_tr = r'''(?<=-)[\s\S\n\r]*'''  # 从字符中抽取所需字符串的正则表达式
                    re_tr = re.findall(res_tr, sortC, re.S)  # 利用正则表达式获取字符串并存到链表
                    if len(re_tr):
                        sortC = re_tr[0]  # 取链表的第一个值

                trsjs = '//*[@id="%s"]/tr' % trsid
                trs = response.xpath(trsjs)
                # 获取成员列表
                for tr in trs:
                    # 年份
                    yearS = tr.xpath('./td[2]//text()')
                    year = ''
                    if len(yearS):
                        for yearSl in yearS:
                            year = year + ' ' + yearSl.get().strip()
                        year = re.sub(r'''^ *''', r'''''', year)  # 去除开头的空格

                    # 判例标题
                    titleS = tr.xpath('./td[3]//text()')
                    title = ''
                    if len(titleS):
                        for titleSl in titleS:
                            title = title + ' ' + titleSl.get().strip()
                        title = re.sub(r'''^ *''', r'''''', title)  # 去除开头的空格
                    # 判例编码
                    casecodeS = tr.xpath('./td[4]//text()')
                    casecode = ''
                    if len(casecodeS):
                        casecode = casecodeS.get().strip()
                    # 详细网址
                    detailUrlS = tr.xpath('./td[5]//a/@href')
                    detailUrl = ''
                    if len(detailUrlS):
                        a = tr.xpath('./td[5]//a')[0]
                        detailUrl = 'https://' + self.allowed_domains[0] + detailUrlS.get().strip()
                    # 关键词
                    keywordS = tr.xpath('./td[6]//text()')
                    keyword = ''
                    if len(keywordS):
                        for keywordSl in keywordS:
                            keyword = keyword + ' ' + keywordSl.get().strip()
                        keyword = re.sub(r'''^ *''', r'''''', keyword)  # 去除开头的空格
                    # 审理人
                    hearerS = tr.xpath('./td[7]//text()')
                    hearer = ''
                    if len(hearerS):
                        for hearerSl in hearerS:
                            hearer = hearer + ' ' + hearerSl.get().strip()
                        hearer = re.sub(r'''^ *''', r'''''', hearer)  # 去除开头的空格

                    if len(detailUrl):
                        yield response.follow(a, callback=self.detailed, dont_filter=True,
                                              meta={'judgAgency': judgAgency, 'sortC': sortC, 'year': year,
                                                    'title': title, 'casecode': casecode, 'detailUrl': detailUrl,
                                                    'keyword': keyword, 'hearer': hearer})

                # 翻页
                trajs = 'tbody[id="%s"] tr td table tbody tr #pagingWPQ7next a' % trspageid
                tra = ''
                try:
                    tra = response.css(trajs)
                except Exception as e:
                    tra = ''
                    print('无下一页:' + str(e))
                while tra:
                    self.driver.find_element_by_css_selector(trajs).click()
                    time.sleep(5)
                    origin_code = self.driver.page_source
                    res = HtmlResponse(url=response.url, encoding='utf8', body=origin_code, request=response.url)
                    resresponse = res

                    trsjs = '//*[@id="%s"]/tr' % trsid
                    trs = resresponse.xpath(trsjs)
                    # 获取成员列表
                    for tr in trs:
                        # 年份
                        yearS = tr.xpath('./td[2]//text()')
                        year = ''
                        if len(yearS):
                            for yearSl in yearS:
                                year = year + ' ' + yearSl.get().strip()
                            year = re.sub(r'''^ *''', r'''''', year)  # 去除开头的空格

                        # 判例标题
                        titleS = tr.xpath('./td[3]//text()')
                        title = ''
                        if len(titleS):
                            for titleSl in titleS:
                                title = title + ' ' + titleSl.get().strip()
                            year = re.sub(r'''^ *''', r'''''', year)  # 去除开头的空格
                        # 判例编码
                        casecodeS = tr.xpath('./td[4]//text()')
                        casecode = ''
                        if len(casecodeS):
                            casecode = casecodeS.get().strip()
                        # 详细网址
                        detailUrlS = tr.xpath('./td[5]//a/@href')
                        detailUrl = ''
                        if len(detailUrlS):
                            a = tr.xpath('./td[5]//a')[0]
                            detailUrl = 'https://' + self.allowed_domains[0] + detailUrlS.get().strip()
                        # 关键词
                        keywordS = tr.xpath('./td[6]//text()')
                        keyword = ''
                        if len(keywordS):
                            for keywordSl in keywordS:
                                keyword = keyword + ' ' + keywordSl.get().strip()
                            keyword = re.sub(r'''^ *''', r'''''', keyword)  # 去除开头的空格
                        # 审理人
                        hearerS = tr.xpath('./td[7]//text()')
                        hearer = ''
                        if len(hearerS):
                            for hearerSl in hearerS:
                                hearer = hearer + ' ' + hearerSl.get().strip()
                            hearer = re.sub(r'''^ *''', r'''''', hearer)  # 去除开头的空格

                        if len(detailUrl):
                            yield response.follow(a, callback=self.detailed, dont_filter=True,
                                                  meta={'judgAgency': judgAgency, 'sortC': sortC, 'year': year,
                                                        'title': title, 'casecode': casecode, 'detailUrl': detailUrl,
                                                        'keyword': keyword, 'hearer': hearer})

                    trajs = 'tbody[id="%s"] tr td table tbody tr #pagingWPQ7next a' % trspageid
                    tra = ''
                    try:
                        tra = resresponse.css(trajs)
                    except Exception as e:
                        tra = ''
                        print('无下一页:' + str(e))

    # 解析详情页
    def detailed(self, response):
        item = MyFileItem()
        if response.status == 200:

            # 国别及文件信息
            country = 'Bruneis'
            website = 'judiciary'
            modular = 'case'
            judgAgency = response.meta['judgAgency']
            sortC = response.meta['sortC']
            year = response.meta['year']
            title = response.meta['title']
            casecode = response.meta['casecode']
            keyword = response.meta['keyword']
            hearer = response.meta['hearer']
            nowurl = response.url
            detailUrl = nowurl
            downloadUrl = detailUrl

            # 下载文件名
            fina = ''
            if len(detailUrl) > 0:
                fina = 'f' + str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())

            # 唯一ID
            systemid = str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())

            item['file_urls'] = ''
            item['country'] = country
            item['website'] = website
            item['modular'] = modular
            item['ext'] = 'pdf'
            item['fina'] = fina
            item['title'] = title
            item['abstractUrl'] = ''
            item['abstract'] = ''
            item['dabstractUrl'] = ''
            item['detail'] = ''
            item['detailUrl'] = ''
            item['downloadUrl'] = downloadUrl

            item['Title'] = title
            item['CaseNumber'] = casecode
            item['KeyWord'] = keyword
            item['SortA'] = 'LAWCOUNTRYWL'
            item['People'] = ''
            item['CaseOfAction'] = ''
            item['UseLaw'] = ''
            item['AdjudicationDate'] = ''
            item['FullText'] = ''
            item['JudgAgency'] = judgAgency
            item['SortB'] = 'LANGUAGEYY'
            item['SortC'] = sortC
            item['CaseSummary'] = ''
            item['Articles'] = ''
            item['Chapter'] = ''
            item['Section'] = ''
            item['SYS_FLD_DIGITFILENAME'] = fina
            item['FileUrl'] = ''
            item['AbstractFileName'] = ''
            item['DownLoadUrl'] = detailUrl
            item['DownLoadWebNameC'] = '文莱最高法院'
            item['DownLoadWebNameE'] = "STATE JUDICIARY DEPARTMENT | BRUNEI DARUSSALAM"
            item['SYSID'] = systemid
            item['Website'] = 'Judiciary Govement'
            item['Isconversion'] = '0'
            item['CaseDate'] = ''

            yield item
