import hashlib

import scrapy
from ..items import MyFileItem
import re


class Ssosg(scrapy.Spider):
    name = 'sso_com_org'
    allowed_domains = ['commonlii.org']
    # 新加坡地方法院判例地址
    start_urls = ['http://www.commonlii.org/sg/cases/SGDC/']
    id = 0

    def parse(self, response):
        # 获得每页列表
        trsls = response.xpath('//blockquote')
        for trsl in trsls:
            trs = trsl.xpath('./a')
            for tr in trs:
                # 模块网站
                modelUrlS = tr.xpath('./@href')
                if len(modelUrlS):
                    modela = tr
                    yield response.follow(modela, callback=self.modelparse, dont_filter=True)

    def modelparse(self, response):
        # 获得年份网址成员列表
        trs = response.xpath('//body/ul/li')
        for tr in trs:
            # 详细网址
            detailUrlS = tr.xpath('./a/@href')
            detailUrl = ''
            if len(detailUrlS):
                a = tr.xpath('./a')[0]
                detailUrl = 'https://' + self.allowed_domains[0] + detailUrlS.get().strip()
            # 判例标题
            titleS = tr.xpath('.//text()')
            title = ''
            if len(titleS):
                for titleSl in titleS:
                    title = title + ' ' + titleSl.get().strip()
                title = re.sub(r'''^ *''', r'''''', title)  # 去除开头的空格

            if len(detailUrl):
                yield response.follow(a, callback=self.detailed, dont_filter=True,
                                      meta={'detailUrl': detailUrl, 'title': title})

    # 解析详情页
    def detailed(self, response):
        item = MyFileItem()
        if response.status == 200:
            # 根据css选择条件抽取内容
            def extract_with_css(query):
                s = response.css(query).get()
                # 处理有些名称显示不出来的页面
                if s is None:
                    pass
                return s

            # 国别及文件信息
            country = 'Singapore'
            website = 'comorg'
            modular = 'case'
            title = response.meta['title']
            nowurl = response.url
            detailUrl = nowurl
            downloadUrl = detailUrl
            # 将判例原文内容以html的格式保存
            html = response.text
            res_tr = r'''<!--sino date[\s\S\n\r]*<!--sino noindex-->'''  # 从网页源码中获取字符的正则表达式
            re_tr = re.findall(res_tr, html, re.S)  # 利用正则表达式获取字符串
            detail = ''
            if len(re_tr):
                detail = re_tr[0]

            # 下载文件名
            fina = ''
            if len(detailUrl) > 0:
                fina = 'f' + str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())

            # 唯一ID
            systemid = str(hashlib.md5(detailUrl.encode('utf-8')).hexdigest())

            if len(detail):
                item['file_urls'] = ''
                item['country'] = country
                item['website'] = website
                item['modular'] = modular
                item['ext'] = 'pdf'
                item['fina'] = fina
                item['title'] = title
                item['abstractUrl'] = ''
                item['abstract'] = ''
                item['detail'] = detail
                item['detailUrl'] = ''
                item['downloadUrl'] = ''

                item['Title'] = title
                item['CaseNumber'] = ''
                item['KeyWord'] = ''
                item['SortA'] = 'LAWCOUNTRYXJP'
                item['People'] = ''
                item['CaseOfAction'] = ''
                item['UseLaw'] = ''
                item['AdjudicationDate'] = ''
                item['FullText'] = ''
                item['JudgAgency'] = ''
                item['SortB'] = 'LANGUAGEYY'
                item['SortC'] = ''
                item['CaseSummary'] = ''
                item['Articles'] = ''
                item['Chapter'] = ''
                item['Section'] = ''
                item['SYS_FLD_DIGITFILENAME'] = fina
                item['FileUrl'] = ''
                item['AbstractFileName'] = ''
                item['DownLoadUrl'] = downloadUrl
                item['DownLoadWebNameC'] = '新加坡地方法院'
                item['DownLoadWebNameE'] = "District Court of Singapore"
                item['SYSID'] = systemid
                item['Website'] = 'District Court of Singapore'
                item['Isconversion'] = '1'
                item['CaseDate'] = ''

                yield item

