#!/usr/bin/env python
# encoding: utf-8
import hashlib
import time

import scrapy
from scrapy import signals
from scrapy.http import HtmlResponse
from selenium import webdriver

from ..items import MyFileItem
import re


class ThaSup(scrapy.Spider):
    name = 'tha_supercourt'
    allowed_domains = ['deka.supremecourt.or.th']
    # 泰国最高法院判例地址
    start_urls = ['http://deka.supremecourt.or.th/']
    id = 0
    Current_page = '0'

    def __init__(self, **kwargs):
        super(ThaSup, self).__init__(**kwargs)
        # 进入浏览器设置
        options = webdriver.ChromeOptions()
        # 设置中文
        options.add_argument('lang=zh_CN.UTF-8')
        # 更换头部
        options.add_argument(
            'user-agent="Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36 "'
        )
        # options.add_argument('--no-sandbox')
        # options.add_argument('--disable-dev-shm-usage')
        # options.add_argument('--headless')
        self.driver = webdriver.Chrome(chrome_options=options)  # 调用本地的谷歌浏览器

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(ThaSup, cls).from_crawler(crawler, *args, **kwargs)
        crawler.signals.connect(spider.closeSpider, signals.spider_closed)
        return spider

    def closeSpider(self):
        self.driver.quit()  # 关闭浏览器

    # 解析初始页面
    def parse(self, response):
        def driverclickxpath(clickxpatn, sleep_time):       # 这是基于xpath的下一页按钮点击函数
            self.driver.find_element_by_xpath(clickxpatn).click()
            time.sleep(sleep_time)
            origin_code = self.driver.page_source
            res = HtmlResponse(url=response.url, encoding='utf8', body=origin_code, request=response.url)
            return res

        def driverclickxpathd(clickxpatn, sleep_time):       # 这是基于xpath的详情网址按钮点击函数
            self.driver.find_element_by_xpath(clickxpatn).click()
            time.sleep(sleep_time)
            Windowshandle = self.driver.window_handles
            try:
                self.driver.switch_to.window(Windowshandle[1])
            except Exception as e:
                print(str(e))
            nowurl = self.driver.current_url
            origin_code = self.driver.page_source
            res = HtmlResponse(url=nowurl, encoding='utf8', body=origin_code, request=response.url)
            detailpdfS = res.xpath('//*[@id="print-layer"]/page')
            detailpdf = ''
            if len(detailpdfS):
                detailpdf = detailpdfS.get().strip()
            result = [nowurl, detailpdf]
            self.driver.close()
            self.driver.switch_to.window(Windowshandle[0])
            return result

        def detaileditem(title, people, detail, detailUrl):
            item = MyFileItem()
            country = 'Thailand'
            website = 'Supermecourt'
            modular = 'Case'
            # 下载文件格式
            ext = 'pdf'

            # 下载文件名
            fina = ''
            if len(detailUrl) > 0:
                fina = 'f' + str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())
            # 唯一ID
            systemid = str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())

            item['file_urls'] = ''
            item['country'] = country
            item['website'] = website
            item['modular'] = modular
            item['ext'] = ext
            item['fina'] = fina
            item['title'] = ''
            item['abstractUrl'] = ''
            item['abstract'] = ''
            item['dabstractUrl'] = ''
            item['detail'] = detail
            item['detailUrl'] = ''
            item['downloadUrl'] = ''

            item['Title'] = title
            item['CaseNumber'] = ''
            item['KeyWord'] = ''
            item['SortA'] = 'LAWCOUNTRYTG'
            item['People'] = people
            item['CaseOfAction'] = ''
            item['UseLaw'] = ''
            item['AdjudicationDate'] = ''
            item['FullText'] = ''
            item['JudgAgency'] = ''
            item['SortB'] = 'LANGUAGETY'
            item['SortC'] = ''
            item['CaseSummary'] = ''
            item['Articles'] = ''
            item['Chapter'] = ''
            item['Section'] = ''
            item['SYS_FLD_DIGITFILENAME'] = fina
            item['FileUrl'] = ''
            item['AbstractFileName'] = ''
            item['DownLoadUrl'] = detailUrl
            item['DownLoadWebNameC'] = '泰国最高法院'
            item['DownLoadWebNameE'] = "SupremeCourt Of Thailand"
            item['SYSID'] = systemid
            item['Website'] = 'Superme Court'
            item['Isconversion'] = '1'
            item['CaseDate'] = ''
            return item

        self.driver.find_element_by_xpath('//*[@id="search_deka_start_year"]').send_keys("2556")
        searchcs = '//*[@id="submit_search_deka"]'
        resresponse = driverclickxpath(searchcs, 10)
        # 获得判例成员列表
        trs = resresponse.xpath('//*[@id="deka_result_info"]//li[@class="clear result"]')
        for tr in trs:
            # 标题
            TitleS = tr.xpath('./ul/li[@class="item_deka_no content-title"]/label//text()')
            Title = ''
            if len(TitleS):
                for TitleSl in TitleS:
                    Title = Title + ' ' + TitleSl.get().strip()
                Title = re.sub(r'''^ *''', r'''''', Title)  # 去除开头的空格
                Title = re.sub(r'''^[1-9][0-9]*.''', r'''''', Title)  # 去除开头的编号
                Title = re.sub(r'''^ *''', r'''''', Title)  # 去除开头的空格
            # 当事人
            PeopleS = tr.xpath(
                './ul/li[@class="content-detail-option"]/ul/li[@class="item_litigant content-option"]//li//text()')
            People = ''
            if len(PeopleS):
                for PeopleSl in PeopleS:
                    People = People + ' ' + PeopleSl.get().strip()
                People = re.sub(r'''^ *''', r'''''', People)  # 去除开头的空格
            # 按钮
            printButtonidS = tr.xpath('./ul/li[@class="item_deka_no content-title"]/ul/li[@class="last-child"]/button/@id')
            printButtonid = ''
            if len(printButtonidS):
                printButtonid = printButtonidS.get().strip()
            printButtoncs = '//*[@id="%s"]' % printButtonid
            try:
                details = driverclickxpathd(printButtoncs, 10)
                DetailUrl = details[0]
                DetailUrl = DetailUrl + '/' + Title
                Detail = details[1]
                if len(Detail):
                    itemhtml = detaileditem(Title, People, Detail, DetailUrl)
                    yield itemhtml
            except:
                pass

        # 翻页
        nexttrS = resresponse.xpath('//*[@id="pagination"]/ul/li')
        Nextpage = True     # 年份是否大于等于2013的标志
        while len(nexttrS):
            # 获取当前页面页码
            Current_pagenowS = resresponse.xpath('//*[@id="pagination"]/ul/li[@class="current"]/a/text()')
            Current_pagenow = ''
            if len(Current_pagenowS):
                Current_pagenow = Current_pagenowS.get().strip()
                if Current_pagenow == self.Current_page:
                    break
                else:
                    self.Current_page = Current_pagenow
            pagenow = int(Current_pagenow)
            for i in range(len(nexttrS)):
                nexttextS = nexttrS[i].xpath('./a/text()')
                if len(nexttextS):
                    nexttext = nexttextS.get().strip()
                    try:
                        nextpage = int(nexttext)
                        if nextpage == pagenow + 1:
                            nextcs = '//*[@id="pagination"]/ul/li[%s]/a' % str(i+1)
                            resresponse = driverclickxpath(nextcs, 10)
                            break
                    except Exception as e:
                        print("获取下一页失败:" + str(e))
                        break

            trs = resresponse.xpath('//*[@id="deka_result_info"]//li[@class="clear result"]')
            for tr in trs:
                # 标题
                TitleS = tr.xpath('./ul/li[@class="item_deka_no content-title"]/label//text()')
                Title = ''
                if len(TitleS):
                    for TitleSl in TitleS:
                        Title = Title + ' ' + TitleSl.get().strip()
                    Title = re.sub(r'''^ *''', r'''''', Title)  # 去除开头的空格
                    Title = re.sub(r'''^[1-9][0-9]*.''', r'''''', Title)  # 去除开头的编号
                    Title = re.sub(r'''^ *''', r'''''', Title)  # 去除开头的空格
                date_year = re.findall(r'''[0-9]{4}$''',Title,re.S)
                if len(date_year):
                    if int(date_year[0])<2556 and self.Current_page>100:
                        Nextpage = False
                        break
                # 当事人
                PeopleS = tr.xpath(
                    './ul/li[@class="content-detail-option"]/ul/li[@class="item_litigant content-option"]//li//text()')
                People = ''
                if len(PeopleS):
                    for PeopleSl in PeopleS:
                        People = People + ' ' + PeopleSl.get().strip()
                    People = re.sub(r'''^ *''', r'''''', People)  # 去除开头的空格
                # 按钮
                printButtonidS = tr.xpath(
                    './ul/li[@class="item_deka_no content-title"]/ul/li[@class="last-child"]/button/@id')
                printButtonid = ''
                if len(printButtonidS):
                    printButtonid = printButtonidS.get().strip()
                printButtoncs = '//*[@id="%s"]' % printButtonid
                try:
                    details = driverclickxpathd(printButtoncs, 10)
                    DetailUrl = details[0]
                    DetailUrl = DetailUrl + '/' + Title
                    Detail = details[1]
                    if len(Detail):
                        itemhtml = detaileditem(Title, People, Detail, DetailUrl)
                        yield itemhtml
                except:
                    pass
            if not Nextpage:break   # 若判例年份小于2013则不继续采集数据
            nexttrS = resresponse.xpath('//*[@id="pagination"]/ul/li')
