# coding:utf-8
import scrapy
from scrapy import Request
import threading
from ..pipelines import get_wfuu_db
from ..items import WenshuContentItem
from BashouScrapy.wenshu.common import init_cookies_for_content
import time
import datetime

from bs4 import BeautifulSoup

thread_local_data = threading.local()

class ContentHeader(object):
    def __init__(self):
        pass

    url = 'http://wenshu.court.gov.cn/CreateContentJS/CreateContentJS.aspx?DocID='

    header = {
        'Host': 'wenshu.court.gov.cn',
        'Origin': 'http: // wenshu.court.gov.cn',
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36',
        'Accept': '*/*',
        'Accept-Language': 'zh,en-US;q=0.7,en;q=0.3',
        'Accept-Encoding': 'gzip, deflate',
        'X-Requested-With': 'XMLHttpRequest',
        'Connection': 'keep-alive',
        'Pragma': 'no-cache',
        'Cache-Control': 'no-cache',
    }


class WenshuContentScrapy(scrapy.Spider):
    name = "wenshu_content"
    custom_settings = {
        'ITEM_PIPELINES': {'pipelines.WenshuContentPipeline': 300}
    }
    crawl_db = get_wfuu_db()
    crawl_tbl = crawl_db["crawl_data"]

    def start_requests(self):
        while True:
            docs = self.crawl_tbl.find({'hasDoc': False}).sort({'_id': 1}).limit(1000)
            if docs.count(with_limit_and_skip=True) == 0:
                break
            for doc in docs:
                try:
                    init_cookies_for_content(ContentHeader.header)
                except Exception as error:
                    print(error)
                yield Request(ContentHeader.url + str(doc[u"文书ID"]),meta={'_id':doc["_id"],'PubDate':doc["PubDate"]},headers=ContentHeader.header)


    def parse(self, response):
        item = WenshuContentItem()
        result = response.body
        text = result["Html"]
        if text == "":
            text = u"文书内容为空"
        pub_date = response.meta["PubDate"]
        item["DocContent"] = text
        item['_id'] = response.meta["_id"]
        if pub_date != '':
            item['PubDate'] = pub_date
        yield item

