# -*- coding: utf-8 -*-

import scrapy
import traceback
from scrapy import Request
from selenium import webdriver
from ..items import IndictmentListItem
from bs4 import BeautifulSoup
from scrapy.spidermiddlewares.httperror import HttpError
import datetime


start_day = datetime.datetime.now() - datetime.timedelta(2)
end_day = datetime.datetime.now() + datetime.timedelta(1)

def get_cookie_by_selenium():
    driver = None
    display = None
    try:
        display, driver = get_chrome_driver()
        if not driver:
            return ""
        driver.implicitly_wait(60)
        driver.get("http://www.ajxxgk.jcy.gov.cn/html/20170517/2/5503208.html")
        # driver.get("http://www.ajxxgk.jcy.gov.cn/")
        cookie_list = driver.get_cookies()
        cookie_str_list = []
        for cookie_dict in cookie_list:
            cookie_str_list.append("{0}={1};".format(cookie_dict["name"], cookie_dict["value"]))
        cookie_str = " ".join(cookie_str_list)
    except Exception as e:
        print
        "lxw get_cookie_by_selenium Exception: {0}\n{1}\n{2}\n\n".format(e, traceback.format_exc(), "--" * 30)
        return ""
    else:
        return cookie_str
    finally:
        if driver:
            driver.quit()
        if display:
            display.stop()


def get_chrome_driver():
    display = None
    driver = None
    try:
        # 关于display这两行在linux上无界面运行的时候需要，在window上需要把display这两行注释掉
        # if SpiderConf.no_monitor:
        #     display = Display(visible=0, size=(800, 800))
        #     display.start()

        options = webdriver.ChromeOptions()
        prefs = {"profile.managed_default_content_settings.images": 2}
        options.add_experimental_option("prefs", prefs)
        options.add_argument("--no-sandbox")

        # 设置中文
        # options.add_argument('lang=zh_CN.UTF-8')
        # 更换头部
        # options.add_argument(
        #     'user-agent = Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36')

        driver = webdriver.Chrome(executable_path=r"/Users/bashou/Documents/Dir/chromedriver",
                                  chrome_options=options)
        # 设置超时时间
        driver.set_page_load_timeout(120)
        driver.set_script_timeout(120)  # 这两种设置都进行才有效
    except Exception as e:
        print
        "lxw get_chrome_driver() Exception: {0}\n{1}\n{2}\n\n".format(e, traceback.format_exc(), "--" * 30)
        if display:
            display.stop()
        if driver:
            driver.quit()
        return None, None
    else:
        return display, driver

def date_range(start_day, end_day):
    for day in range(int((end_day - start_day).days)):
        yield start_day + datetime.timedelta(day)

def get_date_list(start_day, end_day):
    date_list = []
    for day in date_range(start_day, end_day):
        date_list.append(day.strftime('%Y-%m-%d'))
    return date_list

def is_startday_to_endday(page, start_date=start_day, end_date=end_day):
    date_list = get_date_list(start_date, end_date)
    for date_str in date_list:
        if page.find(date_str) != -1:
            return True
    return False

class IndictmentListSpider(scrapy.Spider):
    name = "indictment_list"
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        # 'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive',
        'Host': 'www.ajxxgk.jcy.gov.cn',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'
    }
    custom_settings = {
        'ITEM_PIPELINES': {'pipelines.IndictmentListPipeline': 300}
    }
    startTime = datetime.datetime.now()

    def start_requests(self):
        urls = [
            # 起诉书
            'http://www.ajxxgk.jcy.gov.cn/index.php?m=search&c=index&a=init&typeid=&q=%E8%B5%B7%E8%AF%89%E4%B9%A6&siteid=1&newstypeid=54&time=all&page=',
            # 不起诉书
            'http://www.ajxxgk.jcy.gov.cn/index.php?m=search&c=index&a=init&typeid=&siteid=1&q=%E4%B8%8D%E8%B5%B7%E8%AF%89&page=',
            # 抗诉书
            'http://www.ajxxgk.jcy.gov.cn/index.php?m=search&c=index&a=init&typeid=&siteid=1&q=%E6%8A%97%E8%AF%89&page='
            # 刑事申诉复查决定书
            'http://www.ajxxgk.jcy.gov.cn/index.php?m=search&c=index&a=init&typeid=1&siteid=1&q=%E5%88%91%E4%BA%8B%E7%94%B3%E8%AF%89%E5%A4%8D%E6%9F%A5%E5%86%B3%E5%AE%9A%E4%B9%A6&btnsearch=&page=']

        # for url in urls:
        #     nowTime = datetime.datetime.now()
        #     cookies = get_cookie_by_selenium()
        #     if nowTime > self.startTime + datetime.timedelta(minutes=5):
        #         self.startTime = nowTime
        #         cookies = get_cookie_by_selenium()
        #     self.headers["Cookie"] = cookies
        #     return Request(url + "1", headers=self.headers, meta={'url':url, 'pageNo':1,"download_timeout":120})
        url = 'http://www.ajxxgk.jcy.gov.cn/index.php?m=search&c=index&a=init&typeid=&q=%E8%B5%B7%E8%AF%89%E4%B9%A6&siteid=1&newstypeid=54&time=all&page='
        nowTime = datetime.datetime.now()
        cookies = get_cookie_by_selenium()
        if nowTime > self.startTime + datetime.timedelta(minutes=5):
            self.startTime = nowTime
            cookies = get_cookie_by_selenium()
        self.headers["Cookie"] = cookies
        yield Request(url + "1", headers=self.headers, meta={'url': url, 'pageNo': 1, "download_timeout": 120})


    def parse(self, response):
        item = IndictmentListItem()
        page = response.body
        soup = BeautifulSoup(page, 'html5lib')
        if is_startday_to_endday(response.xpath('/html').extract()[0]):
            wrappers = soup.find_all('li', {'class': 'wrap'})
            # if not wrappers:
            #     raise Exception("can't find data")
            for wrapper in wrappers:
                title_a_tag = wrapper.find('div', {'class': 'title'}).find_all('a')[0]
                title = title_a_tag.get_text()
                case_href = 'http://www.ajxxgk.jcy.gov.cn' + title_a_tag['href']
                upload_date = wrapper.find('div', {'class': 'adds'}).get_text()

                item['title'] = title
                item['caseHref'] = case_href
                item['hasDoc'] = False
                item['uploadDate'] = upload_date
                yield item
            pageNo = response.meta["pageNo"] + 1
            request_url = response.meta["url"]
            nowTime = datetime.datetime.now()
            if nowTime > self.startTime + datetime.timedelta(minutes=5):
                self.startTime = nowTime
                cookies = get_cookie_by_selenium()
                self.headers['Cookie'] = cookies
            yield Request(request_url + str(pageNo), headers=self.headers, dont_filter=False,
                          meta={'url': request_url, 'pageNo': pageNo, "download_timeout": 120}, callback=self.parse)

        # if url == 'http://www.ajxxgk.jcy.gov.cn/index.php?m=search&c=index&a=init&typeid=&q=%E8%B5%B7%E8%AF%89%E4%B9%A6&siteid=1&newstypeid=54&time=all&page=':
            # get_next_page(i, 100, url)
        # else:
        #     # 除了起诉书之外，其他的数量都很少，向后多抓取20页就足够了
        #     get_next_page(i, 50, url)


            # wrappers = response.xpath("//li[@class='wrap']")
            # for wrapper in wrappers:
            #     title_tag = wrapper.xpath(".//div[@class='title']/a")
            #     item['title'] = title_tag[0].xpath("string(.)").extract()[0]
            #     item['caseHref'] = 'http://www.ajxxgk.jcy.gov.cn' + \
            #                        wrapper.xpath(".//div[@class='title']/a/@href").extract()[0]
            #     item['hasDoc'] = False
            #     item['uploadDate'] = wrapper.xpath('.//div[@class="adds"]/text()').extract()[0]
            #     yield item

