# -*- coding=utf-8 -*-
import scrapy
from scrapy.exceptions import CloseSpider
from scrapy.spiders import Spider
from ..items import ZhiWangItem, _DBConf
from .commonFn import *
from redis import Redis


class CourseSpider(Spider):
    name = "ZhiWang"
    allowed_domains = ["cnki.net"]
    start_urls = [
        'http://wap.cnki.net/touch/web'
    ]
    i = 0
    key_word = '远程教育'
    list_url = 'http://wap.cnki.net/touch/web/Article/Search/'
    headers = {
        'accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        'accept-encoding': "gzip, deflate",
        'accept-language': "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
        'cache-control': "no-cache",
        'host': "wap.cnki.net",
        'pragma': "no-cache",
        'referer': "http://wap.cnki.net/touch/web",
        'upgrade-insecure-requests': "1",
        'user-agent': "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Mobile Safari/537.36",
    }

    myFormData = {
        "pageindex": '1',
        "fieldtype": '101',
        "sorttype": '',
        "keyword": key_word,
        "articletype": '14',
        "pagesize": '20'
    }

    def __init__(self, storeConf=json.dumps(_DBConf), limit_count=0, trash_data=False, *a, **kw):
        # 获取数据库配置
        super().__init__(*a, **kw)
        initSpider(self, trash_data=trash_data, limit_count=limit_count, storeConf=storeConf)
        self.limit_count = limit_count
        self.r = Redis(db=1)
        self.db = pymongo.MongoClient("mongodb://175.102.18.112:27018").kd_data.journal_zhiwang_new

    def start_requests(self):
        """
        请求开始前的预请求，帮助完成cookie的设定等
        :return:
        """
        data = {
            "kw": self.key_word,
            "field": 5
        }
        from urllib.parse import urlencode
        url = self.list_url + '?' + urlencode(data)
        yield scrapy.Request(url=url,
                             headers=self.headers,
                             meta={'cookiejar': 1},
                             callback=self.parse)

    def parse(self, response):
        """
        初始化信息，抓取到总数
        :param response:
        :return:
        """
        self.headers['Referer'] = response.request.url
        yield scrapy.FormRequest(url=self.list_url,
                                 headers=self.headers,
                                 method='POST',
                                 meta={'cookiejar': 1},
                                 formdata=self.myFormData,
                                 callback=self.parse_list,
                                 dont_filter=True)

    def parse_list(self, response):
        """
        构造抓取页面
        :param response:
        :return:
        """
        # 总页数
        paper_size = int(response.xpath('//*[@id="totalcount"]/text()').extract_first())
        paper_num = paper_size // int(self.myFormData['pagesize'])
        # 构造请求
        for page in range(1, paper_num):
            self.myFormData["pageindex"] = str(page),
            yield scrapy.FormRequest(url=self.list_url,
                                     headers=self.headers,
                                     method='POST',
                                     meta={'cookiejar': page + 1, 'page': page},  # 更新会话
                                     formdata=self.myFormData,
                                     callback=self.parse_list_link,
                                     dont_filter=True)

    def parse_list_link(self, response):
        """
        对列表进行解析，然后进入到详情页面抓取数据了
        :param response:
        :return:
        """
        self.headers['Referer'] = response.request.url
        items = response.xpath('//a[@class="c-company-top-link"]')
        # 可以将已爬取详情页数写入文件进行记录
        with open('../record_page_new.txt', 'a') as f:
            f.write(str(response.meta['page']) + '\n')
        for item in items:
            url = item.xpath('@href').extract_first()
            if self.has_crawl_url(url):
                self.log("发现重复")
                continue
            yield scrapy.Request(url="http:" + url,
                                 meta={'cookiejar': response.meta['cookiejar'], 'public_year': item.xpath(
                                     'div[@class="c-company__body-name"]/span/text()').extract_first().replace(' ',
                                                                                                               '').replace(
                                     '\r\n', '')},
                                 headers=self.headers,
                                 callback=self.parse_item)

    def parse_item(self, response):
        """
        详情页面解析
        :param response:
        :return:
        """
        base_info = response.xpath('/html/body/div[4]/div[1]')
        keywords = base_info.xpath('//div[contains(text(),"关键词")]/following-sibling::*/a/text()').extract()
        item = ZhiWangItem()
        item['html'] = base_info.xpath('//div[@class="c-card__aritcle"]/text()').extract_first().replace(" ", "")
        item['url'] = response.url
        item['title'] = base_info.xpath('//div[@class="c-card__title2"]/text()').extract_first().replace(" ", "")
        item['author'] = base_info.xpath('//div[@class="c-card__author"]/a[3]/text()').extract_first()
        item['tags'] = keywords
        download_info = response.xpath('/html/body/div[4]/div[2]/div[1]')
        item['link_num'] = download_info.xpath("a[1]/text()").extract_first()
        item['download_num'] = download_info.xpath("a[2]/text()").extract_first()
        # 确定source_type
        if item['url'].find("Dissertation"):
            item['source_type'] = '学位论文'
        elif item['url'].find("Conference"):
            item['source_type'] = '会议论文'
        elif item['url'].find("Newspaper"):
            item['source_type'] = '重要报纸'
        elif item['url'].find("Journal"):
            item['source_type'] = '期刊'
        public_info = response.meta['public_year'].split('\xa0')

        if len(public_info) == 3:
            item['source_type'] = public_info[1]
            item['college'] = public_info[0]
            item['public_year'] = public_info[2]
        else:
            if response.meta['public_year'].find('期') != -1:
                # 表示是期刊，那么我们自己来进行分解
                public_info = response.meta['public_year'].split(" ")[0].split('\xa0')
                item['source_type'] = '期刊'
                item['college'] = public_info[0]
                item['public_year'] = public_info[1]
            else:
                # 直接查找书部分，获取书的名字和时间等
                author_info  = response.xpath('/html/body/a[@class="c-book"]')
                if author_info:
                    is_book = author_info.xpath('//div[@class="c-book__img"]')
                    if is_book:
                        # 表示找到了书
                        book_name = author_info.xpath('//div[@class="c-book__title"]/text()').extract_first()
                        book_time = author_info.xpath('//div[@class="c-book__time"]/text()').extract_first()
                        if book_name.find("论文集") != -1:
                            item['source_type'] = '会议论文'
                        elif book_name.find("报") != -1:
                            item['source_type'] = '重要报纸'
                        else:
                            item['source_type'] = '期刊'
                        item['college'] = book_name.replace(" ","").replace(
                            '\r\n', '')
                        item['public_year'] = book_time.replace(" ","").replace(
                            '\r\n', '')
                    else:
                        self.log("未知道book部分")
                else:
                    self.log("还有其他不知名类型存在。。")
        item['source_type'] = '期刊'
        item['source_type_level'] = 'CSSCI'
        yield item

    def has_crawl_url(self, url):
        if not self.db:
            self.db = pymongo.MongoClient("mongodb://175.102.18.112:27018").kd_data.journal_zhiwang_new
        # 查询是否存在
        return self.db.find({"url":{"$regex":url}}).count()
