# coding:utf8

import scrapy
import time
import traceback
from scrapy import Request
from selenium import webdriver
from ..items import IndictmentContentItem
from bs4 import BeautifulSoup
from BashouScrapy.spiders import indictment_list
from scrapy.spidermiddlewares.httperror import HttpError
from ..pipelines import get_yindan_test
import datetime


class IndictmentContentSpider(scrapy.Spider):
    name = "indictment_content"
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        # 'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive',
        'Host': 'www.ajxxgk.jcy.gov.cn',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
    }
    custom_settings = {
        'ITEM_PIPELINES': {'pipelines.IndictmentContentPipeline': 299}
    }
    crawl_db = get_yindan_test()
    indictment = crawl_db["indictment"]

    def start_requests(self):
        startTime = datetime.datetime.now()
        while True:
            try:
                docs = self.indictment.find({'hasDoc': False}).sort("_id", 1).limit(1000)
                cookies = indictment_list.get_cookie_by_selenium()
                self.headers['Cookie'] = cookies
                if docs.count(with_limit_and_skip=True) == 0:
                    break
                for doc in docs:
                    nowTime = datetime.datetime.now()
                    # 判断当前时间是否比startTime多于五分钟，大于五分钟了换一个新的cookie
                    if nowTime > startTime + datetime.timedelta(minutes=5):
                        startTime = nowTime
                        cookies = indictment_list.get_cookie_by_selenium()
                        self.headers["Cookie"] = cookies
                    yield Request(doc['caseHref'], headers=self.headers,dont_filter=True, meta= {'_id':doc['_id']},callback=self.parse, errback=self.handle_error_back)
                    print ('上传日期：' + str(doc['uploadDate']) + ',docId是' + str(doc['_id']) + '的文书开始更新')
            except Exception:
                continue

    def parse(self,response):
        item = IndictmentContentItem()
        page = response.body
        soup = BeautifulSoup(page, 'html5lib')
        div = soup.find(class_='crumbs')
        if div is not None and '重要案件信息' in div.get_text():
            indictment_type = '重要案件'
        else:
            indictment_type = '公开文书'
        item['type'] = indictment_type
        item['rawData'] = response.xpath('/html').extract()[0]
        item['_id'] = response.meta['_id']
        yield item

    def handle_error_back(self,failure):
        if failure.value.response.status == 404:
            self.indictment.delete_many({'caseHref': failure.request.url})




