import hashlib
import time

import scrapy
from scrapy import signals
from scrapy.http import HtmlResponse
from selenium import webdriver

from ..items import MyFileItem
import re


class ThaSup (scrapy.Spider):
    name = 'tha_admincourt_super'
    allowed_domains = ['admincourt.go.th/admincourt/site/05SearchSuit.html?']
    # 泰国最高行政法院判例地址
    start_urls = ['http://www.admincourt.go.th/admincourt/site/05SearchSuit.html']
    id = 0

    def __init__(self, **kwargs):
        super(ThaSup, self).__init__(**kwargs)
        # 进入浏览器设置
        options = webdriver.ChromeOptions()
        # 设置中文
        options.add_argument('lang=zh_CN.UTF-8')
        # 更换头部
        options.add_argument(
            'user-agent="Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36 "'
        )
        # options.add_argument('--no-sandbox')
        # options.add_argument('--disable-dev-shm-usage')
        # options.add_argument('--headless')
        self.driver = webdriver.Chrome(chrome_options=options)  # 调用本地的谷歌浏览器

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(ThaSup, cls).from_crawler(crawler, *args, **kwargs)
        crawler.signals.connect(spider.closeSpider, signals.spider_closed)
        return spider

    def closeSpider(self):
        self.driver.quit()  # 关闭浏览器

    # 解析初始页面
    def parse(self, response):
        # 判例信息需要点击搜索才能显示
        seactjs = '#ftxtsearchadvanc > div > table > tbody > tr:nth-child(11) > td:nth-child(2) > input:nth-child(1)'
        self.driver.find_element_by_css_selector(seactjs).click()
        time.sleep(10)
        origin_code = self.driver.page_source
        res = HtmlResponse(url=response.url, encoding='utf8', body=origin_code, request=response.url)
        resresponse = res
        # 获得判例成员列表
        trs = resresponse.xpath('//*[@id="data-area"]/div[@class="fts_headline"]')
        for tr in trs:
            # 判例标题
            TitleS = tr.xpath('./a//text()')
            Title = ''
            if len(TitleS):
                for TitleSl in TitleS:
                    Title = Title + ' ' + TitleSl.get().strip()
            Title = Title.strip()
            date_year = re.findall(r'''[0-9]{4}$''', Title, re.S)
            if len(date_year):
                if int(date_year[0]) < 2556:  # 若判例年份效应2013年则不采集该条判例
                    continue
            # 判例详细网址
            detailUrlS = tr.xpath('./a/@href')
            if len(detailUrlS):
                detaila = tr.xpath('./a')[0]
                yield response.follow(detaila, callback=self.modelparse, dont_filter=True)
        # 翻页
        nextajs = '#data-area > div:nth-child(3) > a:nth-child(15)'
        nexta = ''
        try:
            nexta = resresponse.css(nextajs)
        except Exception as e:
            nexta = ''
            print('无下一页:' + str(e))
        Nextpage = True     # 判例年份是否2013年以后标志
        pageNumber = 1  # 判例翻页数量
        while nexta:
            pageNumber = pageNumber + 1
            self.driver.find_element_by_css_selector(nextajs).click()
            time.sleep(10)
            origin_code = self.driver.page_source
            res = HtmlResponse(url=response.url, encoding='utf8', body=origin_code, request=response.url)
            resresponse = res
            # 获得判例成员列表
            trs = resresponse.xpath('//*[@id="data-area"]/div[@class="fts_headline"]')
            for tr in trs:
                # 判例标题
                TitleS = tr.xpath('./a//text()')
                Title = ''
                if len(TitleS):
                    for TitleSl in TitleS:
                        Title = Title + ' ' + TitleSl.get().strip()
                Title = Title.strip()
                date_year = re.findall(r'''[0-9]{4}$''', Title, re.S)
                if len(date_year):
                    if int(date_year[0]) < 2556 and pageNumber>100:
                        Nextpage = False
                        break
                # 判例详细网址
                detailUrlS = tr.xpath('./a/@href')
                if len(detailUrlS):
                    detaila = tr.xpath('./a')[0]
                    yield response.follow(detaila, callback=self.modelparse, dont_filter=True)
            if not Nextpage:break   # 若判例年份小于2013则不继续采集数据
            # 翻页
            nexta = ''
            try:
                nexta = resresponse.css(nextajs)
            except Exception as e:
                nexta = ''
                print('无下一页:' + str(e))
                break

    # 解析判例详细信息页
    def modelparse(self, response):
        detaiUrl = response.url
        # 案号
        CaseNumberS = response.xpath('//*[@id="data-area"]/table//tr[1]//text()')
        CaseNumber = ''
        if len(CaseNumberS):
            for CaseNumberSl in CaseNumberS:
                CaseNumber = CaseNumber + ' ' + CaseNumberSl.get().strip()
            CaseNumber = re.sub(r'''^ *''', r'''''', CaseNumber)  # 去除开头的空格
        # 审理机关
        JudgAgencyS = response.xpath('//*[@id="data-area"]/table//tr[2]/td[2]//text()')
        JudgAgency = ''
        if len(JudgAgencyS):
            for JudgAgencySl in JudgAgencyS:
                JudgAgency = JudgAgency + ' ' + JudgAgencySl.get().strip()
            JudgAgency = re.sub(r'''^ *''', r'''''', JudgAgency)  # 去除开头的空格
        # 上诉日期
        CaseDateS = response.xpath('//*[@id="data-area"]/table//tr[4]/td[2]//text()')
        CaseDate = ''
        if len(CaseDateS):
            for CaseDateSl in CaseDateS:
                CaseDate = CaseDate + ' ' + CaseDateSl.get().strip()
            CaseDate = re.sub(r'''^ *''', r'''''', CaseDate)  # 去除开头的空格
        # 原告
        ProsecutorS = response.xpath('//*[@id="data-area"]/table//tr[5]/td[2]//text()')
        Prosecutor = ''
        if len(ProsecutorS):
            for ProsecutorSl in ProsecutorS:
                Prosecutor = Prosecutor + ' ' + ProsecutorSl.get().strip()
            Prosecutor = re.sub(r'''^ *''', r'''''', Prosecutor)  # 去除开头的空格
        # 被告
        DefendantS = response.xpath('//*[@id="data-area"]/table//tr[6]/td[2]//text()')
        Defendant = ''
        if len(DefendantS):
            for DefendantSl in DefendantS:
                Defendant = Defendant + ' ' + DefendantSl.get().strip()
            Defendant = re.sub(r'''^ *''', r'''''', Defendant)  # 去除开头的空格
        # 当事人
        People = Prosecutor + ' Vs ' + Defendant
        # 标题
        TitleS = response.xpath('//*[@id="data-area"]/table//tr[7]/td[2]//text()')
        Title = ''
        if len(TitleS):
            for TitleSl in TitleS:
                Title = Title + ' ' + TitleSl.get().strip()
            Title = re.sub(r'''^ *''', r'''''', Title)  # 去除开头的空格
        # 下载网址
        downloadUrlS = response.xpath('//*[@id="data-area"]/a/@href')
        if len(downloadUrlS):
            a = response.xpath('//*[@id="data-area"]/a')[0]
            yield response.follow(a, callback=self.detailed, dont_filter=True,
                                  meta={'CaseNumber': CaseNumber, 'JudgAgency': JudgAgency, 'CaseDate': CaseDate,
                                        'People': People, 'Title': Title, 'detaiUrl': detaiUrl})

    # 解析详情页
    def detailed(self, response):
        item = MyFileItem()
        if response.status == 200:

            # 判例判决书下载网址
            downloadUrl = response.url
            # 判例详细网址
            detailUrl = response.meta['detaiUrl']
            # 案例编号
            caseNumber = response.meta['CaseNumber']
            # 裁决机关
            judgAgency = response.meta['JudgAgency']
            # 案例时间
            caseDate = response.meta['CaseDate']
            # 当事人
            people = response.meta['People']
            # 标题
            title = response.meta['Title']
            title = re.sub(r'''"''', r"'", title)  # 修改双引号为单引号

            country = 'Thailand'
            website = 'Admincourt'
            modular = 'superadmincourt'
            # 下载文件格式
            ext = 'pdf'

            # 下载文件名
            fina = ''
            if len(detailUrl) > 0:
                fina = 'f' + str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())

            if len(downloadUrl) > 0:
                # 唯一ID
                systemid = str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())

                item['file_urls'] = ''
                item['country'] = country
                item['website'] = website
                item['modular'] = modular
                item['ext'] = ext
                item['fina'] = fina
                item['title'] = ''
                item['abstractUrl'] = ''
                item['abstract'] = ''
                item['dabstractUrl'] = ''
                item['detail'] = ''
                item['detailUrl'] = ''
                item['downloadUrl'] = downloadUrl

                item['Title'] = title
                item['CaseNumber'] = caseNumber
                item['KeyWord'] = ''
                item['SortA'] = 'LAWCOUNTRYTG'
                item['People'] = people
                item['CaseOfAction'] = ''
                item['UseLaw'] = ''
                item['AdjudicationDate'] = ''
                item['FullText'] = ''
                item['JudgAgency'] = judgAgency
                item['SortB'] = 'LANGUAGETY'
                item['SortC'] = ''
                item['CaseSummary'] = ''
                item['Articles'] = ''
                item['Chapter'] = ''
                item['Section'] = ''
                item['SYS_FLD_DIGITFILENAME'] = fina
                item['FileUrl'] = ''
                item['AbstractFileName'] = ''
                item['DownLoadUrl'] = detailUrl
                item['DownLoadWebNameC'] = '泰国最高行政法院'
                item['DownLoadWebNameE'] = "Supreme Administrative Court Of Thailand"
                item['SYSID'] = systemid
                item['Website'] = 'Super Admincourt'
                item['Isconversion'] = '0'
                item['CaseDate'] = caseDate

                yield item


