#-*- coding=utf-8

import scrapy
import time
import math
import urllib
import threading

from ..items import WenshuListItem
# from bs4 import BeautifulSoup
# from scrapy.spidermiddlewares.httperror import HttpError
from BashouScrapy.wenshu import common
from BashouScrapy.wenshu.captcha import parse_captcha
from ..pipelines import get_wfuu_db



thread_local_data = threading.local()

class ListContentHeader(object):
    def __init__(self):
        pass

    url = 'http://wenshu.court.gov.cn/List/ListContent'

    header = {
        'Host': 'wenshu.court.gov.cn',
        'Origin': 'http://wenshu.court.gov.cn',
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
        'Accept': '*/*',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Accept-Encoding': 'gzip, deflate',
        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
        'X-Requested-With': 'XMLHttpRequest',
        'Referer': 'http://wenshu.court.gov.cn/List/List?sorttype=1&conditions=searchWord+1++%E5%88%91%E4%BA%8B%E6%A1%88%E4%BB%B6+%E6%A1%88%E4%BB%B6%E7%B1%BB%E5%9E%8B:%E5%88%91%E4%BA%8B%E6%A1%88%E4%BB%B6',
        'Connection': 'keep-alive',
        'Pragma': 'no-cache',
        'Cache-Control': 'no-cache'
        # 'Cookie': None
    }

    cookie = None

class WenshuListSpider(scrapy.Spider):
    name = "wenshu_list"
    custom_settings = {
        'ITEM_PIPELINES': {'pipelines.WenshuListPipeline': 298}
        # 'DOWNLOADER_MIDDLEWARES': {'middlewares.WenshuListDownloaderMiddleware':543}
        # 'DOWNLOAD_DELAY': 2,
        # 'DOWNLOAD_TIMEOUT': 120, #(默认180s),
        # 'RANDOMIZE_DOWNLOAD_DELAY': True #(默认是true, 等待0.5-1.5*DOWNLOAD_DELAY时间，防止被禁)
        # DOWNLOADER_MIDDLEWARES:
    }
    # handle_httpstatus_list = [301, 302, 204, 206, 403, 404, 500, 501, 502, 503, 505]
    crawl_db = get_wfuu_db()
    crawl_tbl = crawl_db["crawl_tbl_17"]

    def start_requests(self):
        # startTime = datetime.datetime.now()
        while True:
            try:
                query_param = {"$query": {"IS_CRAWL": 0, "COUNT": {"$gt": 0, "$lte": 2000}},"$orderby": {"_id": 1}}
                # target_info = self.crawl_tbl.find_one(query_param)
                target_info = {u'COUNT': 1277, u'CURRENT_INDEX': u'2', u'IS_CRAWL': 0, u'CN_PARAM': u'\u4e0a\u4f20\u65e5\u671f:2017-12-13+TO+2017-12-13,\u6848\u4ef6\u7c7b\u578b:\u6c11\u4e8b\u6848\u4ef6,\u88c1\u5224\u65e5\u671f:2017-11-08+TO+2017-11-08', u'PARAM': u'%E4%B8%8A%E4%BC%A0%E6%97%A5%E6%9C%9F%3A2017-12-13+TO+2017-12-13%2C%E6%A1%88%E4%BB%B6%E7%B1%BB%E5%9E%8B%3A%E6%B0%91%E4%BA%8B%E6%A1%88%E4%BB%B6%2C%E8%A3%81%E5%88%A4%E6%97%A5%E6%9C%9F%3A2017-11-08+TO+2017-11-08', u'FINAL_PARAM': True, u'HAS_ERROR': False, u'CRAWL_TIME': 0, u'PUB_DATE': u'2017-12-13'}
                if target_info is None:
                    break
                print (target_info["CN_PARAM"] + ":" + str(target_info["COUNT"]))
                count = target_info['COUNT']
                param = str(target_info['PARAM'])
                pub_date = str(target_info['PUB_DATE'])
                max_index = math.floor(count / 20.0) + 1
                if target_info.get('CURRENT_INDEX', -1) != -1:
                    min_index = int(target_info['CURRENT_INDEX']) + 1
                else:
                    min_index = 1
                for i in range(min_index, max_index + 1):
                    try:
                        formdata = {
                            'Param': param,
                            'Index': str(i),
                            'Page': '20',
                            'Order': urllib.parse.quote('法院层级'),
                            'Direction': 'asc',
                            'PubDate': pub_date,
                        }
                        # formdata = get_complete_formdata(formdata)

                    except Exception as error:
                        print(error)
                    yield scrapy.FormRequest(ListContentHeader.url, formdata, headers=ListContentHeader.header, method="post", cookies={},
                                                 dont_filter=True, errback=self.handle_error_back)
                    # request = scrapy.FormRequest(ListContentHeader.url, formdata=formdata, headers=ListContentHeader.header)
                    # requestList.append(request)
                # yield requestList

            except Exception as error:
                print(error)
                continue

    def parse(self,response):
        item = WenshuListItem()
        page = response.body
        # soup = BeautifulSoup(page, 'html5lib')
        # div = soup.find(class_='crumbs')
        # if div is not None and '重要案件信息' in div.get_text():
        #     indictment_type = '重要案件'
        # else:
        #     indictment_type = '公开文书'
        # item['type'] = indictment_type
        # item['rawData'] = response.xpath('/html').extract()[0]
        # item['_id'] = response.meta['_id']
        # yield item

    def handle_error_back(self,failure):
        if failure.value.response.status != 202:
            print(failure)
