#!/usr/bin/env python
# -*- coding: utf-8 -*-
# For sister Deng Hong
# Description: cnki crawler
# Date: 2018-04-15

import time
import re
import sys

from framework.crawler import Crawler
from framework.crawler import ReferenceItem
import framework.brwutil as brwutil

cnki_kns_url = r"http://kns.cnki.net/kns/brief/result.aspx?dbprefix=SCDB"
cnki_search_url = r"http://kns.cnki.net/kns/request/SearchHandler.ashx?action=&NaviCode=*&ua=1.21&PageName=ASP.brief_result_aspx&DbPrefix=SCDB&DbCatalog=%e4%b8%ad%e5%9b%bd%e5%ad%a6%e6%9c%af%e6%96%87%e7%8c%ae%e7%bd%91%e7%bb%9c%e5%87%ba%e7%89%88%e6%80%bb%e5%ba%93&ConfigFile=SCDB.xml&db_opt=CJFQ%2CCJRF%2CCDFD%2CCMFD%2CCPFD%2CIPFD%2CCCND%2CCCJD&au_1_sel=AU&au_1_sel2=AF&au_1_value2=%E5%8C%97%E4%BA%AC%E5%B7%A5%E4%B8%9A%E5%A4%A7%E5%AD%A6&au_1_special1=%3D&au_1_special2=%25&his=0&__=Sun%20Apr%2015%202018%2009%3A39%3A22%20GMT%2B0800%20(%E4%B8%AD%E5%9B%BD%E6%A0%87%E5%87%86%E6%97%B6%E9%97%B4)"

# 这个是加入条件后的请求url，从浏览器里面设置好条件，复制url到这就可以了
cnki_condition_url = r"http://kns.cnki.net/kns/brief/brief.aspx?pagename=ASP.brief_result_aspx&dbPrefix=SCDB&dbCatalog=%E4%B8%AD%E5%9B%BD%E5%AD%A6%E6%9C%AF%E6%96%87%E7%8C%AE%E7%BD%91%E7%BB%9C%E5%87%BA%E7%89%88%E6%80%BB%E5%BA%93&ConfigFile=SCDB.xml&research=off&t=1523756035786&keyValue=&S=1"

db_path = "./data/cnki.db"


def enter_cnki(browser):
    browser.get(cnki_kns_url)
    browser.get(cnki_search_url)
    browser.get(cnki_condition_url)
    pg_ele = browser.find_element_by_css_selector(".TitleLeftCell a")
    pg_base_url = pg_ele.get_attribute("href")
    print("---------------\n%s\n---------------" % (pg_base_url))
    return pg_base_url

# CNKI 爬虫类


class CNKICrawler(Crawler):
    def __init__(self):
        Crawler.__init__(self, "CNKI", "./data/cnki.db")

    # override
    def get_page_count(self):
        self.page_base_url = enter_cnki(self.browser)
        return 100

    # override
    def get_items_urls(self, page_index):
        if page_index > 0:
            self.browser.get(self.page_base_url.replace(
                "curpage=2", "curpage=%d" % (page_index + 1)))

        fnames = self.browser.find_elements_by_name("FileNameS")
        urls = []
        for fname in fnames:
            v = fname.get_attribute("value")
            rs = re.search("(.*?)!(.*?)!(.*?)!(.*)", v)
            url = "http://kns.cnki.net/KCMS/detail/detail.aspx?dbcode=CJFR&dbname=%s&filename=%s" % (
                rs.group(1), rs.group(2))
            urls.append(url)
        return urls

    # override
    def get_item(self, url):
        browser = self.browser
        refItem = ReferenceItem()
        browser.get(url)
        title_elem = browser.find_element_by_css_selector("h2.title")
        refItem.title = title_elem.text == None if "" else title_elem.text
        refItem.url = url
        author_elems = browser.find_elements_by_css_selector(
            "div.author span a")
        auths = []
        for ele in author_elems:
            auths.append(ele.text)
        refItem.authors = ";".join(auths)

        try:
            abs_elem = browser.find_element_by_id("ChDivSummary")
            refItem.abstract = abs_elem.text
        except Exception as e:
            pass

        orgn_elems = browser.find_elements_by_css_selector("div.orgn span a")
        orgns = []
        for ele in orgn_elems:
          ele_t = ele.text
          if ele_t != "":
            orgns.append(ele_t)
        refItem.institutions = ";".join(orgns)

        try:
            kys_elems = browser.find_element_by_id("catalog_KEYWORD").find_element_by_xpath(
                "..").find_elements_by_css_selector("a")
            kys = []
            for ele in kys_elems:
                ele_t = ele.text
                if ele_t != "":
                  kys.append(ele_t)
            refItem.keywords = ";".join(kys)
        except Exception as e:
            pass

        try:
            # catalog_ZCDOI
            refItem.doi = browser.find_element_by_id(
                "catalog_ZCDOI").find_element_by_xpath("..").text
        except Exception as e:
            pass

        try:
            # catalog_ZTCLS
            refItem.category = browser.find_element_by_id(
                "catalog_ZTCLS").find_element_by_xpath("..").text
        except Exception as e:
            pass

        info_ele = browser.find_element_by_class_name("sourinfo")
        if info_ele != None:
            try:
                # source information
                refItem.journal = info_ele.find_elements_by_css_selector("p a")[
                    0].text
                yearandissue = info_ele.find_elements_by_css_selector("p a")[
                    2].text
                ms = re.search(u'(\\d+)年(\\d+)期', yearandissue)
                if ms:
                    refItem.year = ms.group(1)
                    refItem.issue = ms.group(2)

                refItem.issn = info_ele.find_elements_by_css_selector("p")[
                    3].text
            except Exception as e:
                pass

        return refItem


if __name__ == "__main__":
    cnki_crawler = CNKICrawler()
    cnki_crawler.start(True)
    sys.path.append(".") #chromedriver.exe当前路径加入到path
    print("End!")
