# -*- coding=utf-8 -*-
import scrapy
import time

from bson import ObjectId
from scrapy.spiders import Spider
from ..items import ZhiWangItem, _DBConf
from scrapy.exceptions import CloseSpider
from bs4 import BeautifulSoup
from .commonFn import *
from redis import Redis
from pymongo import MongoClient


class CourseSpider(Spider):
    name = "ZhiWangM"
    allowed_domains = ["cnki.net"]
    start_urls = [
        'http://wap.cnki.net/touch/web'
    ]
    i = 0
    key_word = '远程教育'
    list_url = 'http://wap.cnki.net/touch/web/Article/Search/'
    headers = {
        'accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        'accept-encoding': "gzip, deflate",
        'accept-language': "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
        'cache-control': "no-cache",
        'host': "wap.cnki.net",
        'pragma': "no-cache",
        'referer': "http://wap.cnki.net/touch/web",
        'upgrade-insecure-requests': "1",
        'user-agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36",
    }

    myFormData = {
        "pageindex": '1',
        "fieldtype": '101',
        "sorttype": '',
        "keyword": key_word,
        "articletype": '-1',
        "pagesize": '20'
    }

    def __init__(self, storeConf=json.dumps(_DBConf), limit_count=0, trash_data=False, *a, **kw):
        # 获取数据库配置
        super().__init__(*a, **kw)
        initSpider(self, trash_data=trash_data, limit_count=limit_count, storeConf=storeConf)
        self.limit_count = limit_count
        self.r = Redis(db=1)
        self.db = MongoClient("mongodb://175.102.18.112:27018").kd_data.journal_zhiwang_new

    def start_requests(self):
        """
        请求开始前的预请求，帮助完成cookie的设定等
        :return:
        """
        # data = {
        #     "kw": self.key_word,
        #     "field": 5
        # }
        from urllib.parse import urlencode
        # url = self.list_url + '?' + urlencode(data)
        yield scrapy.Request(url="http://wap.cnki.net/touch/web/Journal/Article/SXDH201002023.html",
                             headers=self.headers,
                             meta={'cookiejar': 1},
                             callback=self.parse_list)

    def parse_list(self, response):
        """
        构造抓取页面
        :param response:
        :return:
        """
        # 总页数
        wait_scrapy = self.db.find(
            {"public_year": {"$exists": False}})
        # 构造请求
        i = 1
        for wait in wait_scrapy:
            yield scrapy.FormRequest(url=wait['url'],
                                     headers=self.headers,
                                     method='GET',
                                     meta={'cookiejar': i + 1, 'page': i, 'wait_key': str(wait['_id'])},  # 更新会话
                                     callback=self.parse,
                                     dont_filter=True)
            i = i + 1

    def parse(self, response):
        """
        详情页面解析
        :param response:
        :return:
        """
        item = ZhiWangItem()
        author_info = response.xpath('/html/body/a[@class="c-book c-card__new4  u-mt-2"]')
        if author_info:
            is_book = author_info.xpath('//div[@class="c-book__img"]')
            if is_book:
                # 表示找到了书
                book_name = author_info.xpath('//div[@class="c-book__title"]/text()').extract_first()
                book_time = author_info.xpath('//div[@class="c-book__time"]/text()').extract_first()
                item['college'] = book_name.replace(" ", "").replace(
                    '\r\n', '')
                item['public_year'] = book_time.replace(" ", "").replace(
                    '\r\n', '')
            else:
                self.log("未知道book部分")
        else:
            self.log("还有其他不知名类型存在。。")
        # yield item
        wait_id = ObjectId(response.meta['wait_key'])
        print(self.db.update({"_id": wait_id}, {"$set": item}))
