# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import FormRequest, Request
from datetime import datetime, timedelta
from panjuewenshu.items import Wenshu
from panjuewenshu.normalizor import CleanHTMLNormalizor
import json

class JiaotongzhaoshiSpider(scrapy.Spider):
    name = "jiaotongzhaoshi"

    headers = {
        'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
    }

    list_content_api = 'http://wenshu.court.gov.cn/List/ListContent'

    formdata = {
        'Param' : u'案件类型:刑事案件,三级案由:交通肇事,裁判日期:%s',
        'Index' : '%s', # 页码
        'Page' : '20',  # 每页数据量
        'Order' : u'法院层级',
        'Direction' : 'asc'
    }

    # 按月获取数据，参数格式为 "2012-01-01 TO 2012-02-01"
    def gen_time_area(self):
        start = datetime.strptime('2012-01-01', '%Y-%m-%d')
        to = start
        end = datetime.now()
        while to < end:
            start = to
            to = start + timedelta(days=30)
            yield '%s TO %s' % (start.strftime("%Y-%m-%d"), to.strftime("%Y-%m-%d"))

    def start_requests(self):
        for date in self.gen_time_area():
            meta = {'date' : date}
            formdata = self.formdata.copy()
            formdata['Param'] = formdata['Param'] % date
            formdata['Index'] = formdata['Index'] % '1'
            # 生成第一页的请求
            yield FormRequest(self.list_content_api, headers=self.headers, meta=meta, formdata=formdata, callback=self.parse_first_page)

    def __extract_count(self, data):
        for item in data:
            if 'Count' in item:
                return int(item.get('Count'))
        return 0

    # 解析当月第一页的数据
    def parse_first_page(self, response):
        meta = response.meta
        try:
            # 这里需要load两次得到json数据
            data = json.loads(response.body)
            data = json.loads(data)
            # 获取当月总量
            count = self.__extract_count(data)
            self.logger.info('got result, date : %s, page : 1, count : %s, content : %s' % (meta.get('date'), count, json.dumps(data, ensure_ascii=False).encode('utf-8')))
        except Exception, e:
            self.logger.exception('exception : %s' % e)
        else:
            # 分页获取当月从第二页开始的所有数据
            pages = min(100, count/20+1)
            for page in range(2, pages+1):
                formdata = self.formdata.copy()
                formdata['Param'] = formdata['Param'] % meta.get('date')
                formdata['Index'] = formdata['Index'] % str(page)
                new_meta = meta.copy()
                new_meta['page'] = page
                yield FormRequest(self.list_content_api, headers=self.headers, meta=new_meta, formdata=formdata, callback=self.parse_pages)

    # 解析当月第二页之后的数据
    def parse_pages(self, response):
        meta = response.meta
        try:
            data = json.loads(response.body)
            self.logger.info('got result, date : %s, page : %s, content : %s' % (meta.get('date'), meta.get('page'), json.dumps(data, ensure_ascii=False).encode('utf-8')))
        except Exception, e:
            self.logger.exception('exception : %s' % e)

class WenshuContentSpider(scrapy.Spider):

    name = 'wenshu_content'

    api = 'http://wenshu.court.gov.cn/CreateContentJS/CreateContentJS.aspx?DocID=%s'

    headers = {
        'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
    }

    def __init__(self):
        self.clean_html_normalizor = CleanHTMLNormalizor()

    def __load_tasks(self):
        for item in Wenshu.select().where(Wenshu.content.is_null(True)):
            yield item

    def start_requests(self):
        for item in self.__load_tasks():
            url = self.api % item.docid
            meta = {'item' : item}
            headers = self.headers.copy()
            headers['Referer'] = 'http://wenshu.court.gov.cn/content/content?DocID=%s' % item.docid
            yield Request(url, headers=headers, meta=meta, callback=self.parse_content)

    def __extract_content(self, response):
        target = 'var jsonHtmlData = '
        for line in response.body.split('\n'):
            if target in line:
                start = line.find(target) + len(target)
                end = line.rfind(';')
                line = line[start:end]
                return json.loads(json.loads(line)).get('Html')

    def parse_content(self, response):
        item = response.meta.get('item')
        real_content = self.__extract_content(response)
        if not real_content:
            self.logger.warnning('failed to get content : %s' % response.meta.get('item').docid)
            return

        item.content = self.clean_html_normalizor.normalize(real_content)
        item.save()

