# -*- coding: utf-8 -*-

import scrapy
import re
import json
from judgement.items import JudgementItem
from judgement.utils.endecode import encode_utf8
from datetime import datetime, timedelta


class JudgementSpider(scrapy.Spider):
    name = "caipanwenshu"
    allowed_domains = ["wenshu.court.gov.cn"]
    list_url = 'http://wenshu.court.gov.cn/List/ListContent'

    headers = {
        'X-Requested-With' : 'XMLHttpRequest', 
        'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8', 
        'Connection' : 'keep-alive', 
        'Host' : 'wenshu.court.gov.cn', 
    } 

    custom_settings = {
        'ITEM_PIPELINES' : {
            'judgement.pipelines.JudgementPipeline' : 100,
        }
    }

    formdata = {
        'Param' : u'案由:交通肇事,案件类型:刑事案件,裁判日期:%s',
        'Page' : '20',
        'Order' : u'裁判日期',
        'Index' : '%s',
        'Direction' : 'asc',
    }

    def __gen_timestamp(self):
        start = datetime.strptime('2016-01-01', '%Y-%m-%d')
        end = start
        now = datetime.now()
        while end < now :
            start = end
            end = start + timedelta(days = 5)
            yield '%s TO %s' % (start.strftime('%Y-%m-%d'), end.strftime('%Y-%m-%d'))

    def __extract_count(self, data):
        for item in data:
            if 'Count' in item:
                return int(item.get('Count'))
        return 0

    def start_requests(self):
        for date in self.__gen_timestamp():
            formdata = self.formdata.copy()
            formdata['Param'] = formdata['Param'] % date
            formdata['Index'] = formdata['Index'] % '1'
            meta = {'date' : date}
            yield scrapy.FormRequest(self.list_url, 
                                     formdata = formdata, 
                                     headers = self.headers, 
                                     dont_filter = True, 
                                     callback = self.parse, 
                                     meta = meta)

    def parse(self, response):
        meta = response.meta
        try:
            data = json.loads(json.loads(response.body.decode('utf-8')))
            #print data
            count = self.__extract_count(data)
        except Exception, e:
            self.logger.exception('parse exception: %s' % e)
        else:
            self.logger.info('count=%s, count / 20 = %s' % (str(count), str(count / 20)))
            for num in range(2, count / 20):
                formdata = self.formdata.copy()
                formdata['Param'] = formdata['Param'] % meta.get('date')
                formdata['Index'] = formdata['Index'] % str(num)
                new_meta = meta.copy()
                new_meta['num'] = num
                self.logger.debug('parse: num=%s' % num)
                yield scrapy.FormRequest(self.list_url, 
                                         formdata = formdata, 
                                         headers = self.headers, 
                                         dont_filter = True, 
                                         callback = self.parse_info, 
                                         meta = new_meta)

    def parse_info(self, response):
        meta = response.meta
        self.logger.info("parse_info: [data=%s] [page=%s]" % (meta.get('date'), str(meta.get('num'))))
        try:
            list_inf = json.loads(json.loads(response.body.decode('utf-8')))
        except Exception, e:
            self.logger.exception('parse exception: %s [page=%s]' % (e, meta.get('num')))
        else:
            list_info = list_inf[1:]
            #print list_info
            for info in list_info:
                item = JudgementItem()
                try:
                    item['gist'] = encode_utf8(info[u'裁判要旨段原文'])
                except:
                    item['gist'] = encode_utf8(u'无')
                item['type'] = encode_utf8(info[u'案件类型'])
                item['date'] = encode_utf8(info[u'裁判日期'])
                item['name'] = encode_utf8(info[u'案件名称'])
                item['docid'] = encode_utf8(info[u'文书ID'])
                item['produce'] = encode_utf8(info[u'审判程序'])
                item['num'] = encode_utf8(info[u'案号'])
                item['court'] = encode_utf8(info[u'法院名称'])
                item['url'] = encode_utf8('http://wenshu.court.gov.cn/content/content?DocID=%s'% info[u'文书ID'])
                url_content = 'http://wenshu.court.gov.cn/CreateContentJS/CreateContentJS.aspx?DocID=%s'% info[u'文书ID']
                self.logger.debug('parse_info: send http request [docid=%s]' % info[u'文书ID'])
                yield scrapy.Request(url_content, callback = self.parse_content, meta = {'item' : item}, dont_filter = True)

    def parse_content(self, response):
        item = response.meta['item']
        pattern = re.compile(r'jsonHtmlData = "(.*?)";')
        try:
            content = re.findall(pattern, response.body.decode('utf-8'))[0]
        except:
            url_content = 'http://wenshu.court.gov.cn/CreateContentJS/CreateContentJS.aspx?DocID=%s'%item['docid']
            #return scrapy.Request(url_content, callback = self.parse_content, meta = {'item' : item}, dont_filter = True)   
            self.logger.info('parse_content error [url: %s] % url_content')
            return item
        item['content'] = encode_utf8(content)
        """
        conn = mysql.connector.connect(user='root', password='', database='caipan')
        cursor = conn.cursor()
        cursor.execute('insert into panjueshu (name,gist,docid,produce,type,url,num,court,date,content) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)', [item['name'],item'gist'],item['docid'],item['produce'],item['type'],item['url'],item['num'],item['court'],item['date'],item['content']])
        conn.commit()
        conn.close()
        """

        #self.logger.info("%s : %s : %s : %s : %s : %s : %s : %s : %s : %s" % (item['name'],item['gist'],item['docid'],item['produce'],item['type'],item['url'],item['num'],item['court'],item['date'],item['content']))

        return item


