# coding=utf-8

import random
from urllib import urlencode

import scrapy
import time
from bs4 import BeautifulSoup
from scrapy.spiders import CrawlSpider

from myTest.item.AddPaperKeyWordItem import AddPaperKeyWordItem
from myTest.utils.getdata import GetData
import logging


class KeyWordSpider(CrawlSpider):
    name = "KeyWordInfo"
    base_url = "http://kns.cnki.net/kns/"
    data_block = None
    max_block_count = 62

    def __init__(self, data_block=None, *a, **kw):
        super(KeyWordSpider, self).__init__(*a, **kw)
        if data_block is not None:
            self.data_block = int(data_block)

    def start_requests(self):
        search_base_url = self.base_url + "request/SearchHandler.ashx"
        if self.data_block is None or self.data_block > self.max_block_count:
            self.data_block = random.randint(1, self.max_block_count)
            logging.warning("data_block is None or over max_block_count\nwill run with block {} by random".
                            format(self.data_block))
        urlParams = {
            "action": "",
            "NaviCode": "*",
            "ua": 1.11,
            "PageName": "ASP.brief_default_result_aspx",
            "DbPrefix": "SCDB",
            "DbCatalog": u"中国学术文献网络出版总库".encode("utf-8"),
            "ConfigFile": "SCDBINDEX.xml",
            "db_opt": "CJFQ,CJRF,CDFD,CMFD,CPFD,IPFD,CCND,CCJD",
            "txt_1_sel": "SU$%=|",
            "txt_1_special1": "%",
            "txt_1_value1": None,
            "his": 0,
            "parentdb": "SCDB",
            "__": "Thu Jan 11 2018 13:10:09 GMT+0800"
        }
        ssDataList = GetData.get_more_paper_data_block(self.data_block)

        i = -1
        for ssData in ssDataList:
            i += 1
            urlParams["txt_1_value1"] = ssData["paperName"].encode('utf-8')
            # urlParams["txt_1_value1"] = ssData["paperName"].encode('utf-8')+' '+ssData['paperAuthor'].encode('utf-8')
            urlParams["__"] = ""
            url = search_base_url + "?" + urlencode(urlParams)
            yield scrapy.Request(url, callback=self.get_basic_patent_info,
                                 meta={'ssData': ssData, 'cookiejar': i})

    def get_basic_patent_info(self, response):
        ssData = response.meta['ssData']
        brief_url = self.base_url + "brief/brief.aspx"
        urlParams = {
            "pagename": "ASP.brief_default_result_aspx",
            "dbPrefix": "SCDB",
            "dbCatalog": u"中国学术文献网络出版总库".encode("utf-8"),
            "ConfigFile": "SCDBINDEX.xml",
            "research": "off",
            "DisplayMode":"listmode",
            "t": int(time.time() * 1000),
            "keyValue": None,
            "S": 1
        }
        urlParams["txt_1_value1"] = ssData["paperName"].encode('utf-8')

        # urlParams["txt_1_value1"] = ssData["paperName"].encode('utf-8')+' '.encode('utf-8')+ssData['paperAuthor'].encode('utf-8')
        url = brief_url + "?" + urlencode(urlParams)
        yield scrapy.Request(url=url, callback=self.get_paperLink_info,
                             meta={'ssData': ssData, 'cookiejar': response.meta['cookiejar']})

    def get_paperLink_info(self, response):
        ssData = response.meta['ssData']

        text = response.text
        bs4 = BeautifulSoup(text, 'lxml')
        top = bs4.find(bgcolor="#ffffff")

        try:
            href = top.find('a', class_="fz14")
            paperLink = href.get('href')
            paperLink = "http://kns.cnki.net" + paperLink
            ssData['paperLink'] = paperLink
            yield scrapy.Request(url=paperLink, callback=self.get_reference_info,
                                 meta={'ssData': ssData, 'cookiejar': response.meta['cookiejar']})
        except:
            print ssData['paperName'].encode('gb18030') + '  by  ' + ssData['paperAuthor'].encode('gb18030') + 'is not find '




    def get_reference_info(self,response):
        ssData = response.meta['ssData']
        text = response.text
        bs4 = BeautifulSoup(text, 'lxml')
        try:
            result = bs4.find(class_='wxBaseinfo')
        except:
            print("can't find" + ssData['paperName'].encode('gb18030') + "'s wxbaseinfo")
        try:
            zhaiyao = result.find(id='ChDivSummary').get_text()
        except:
            zhaiyao = ''
            print("can't find " + ssData['paperName'].encode('gb18030') + "'s zhaiyao")
        try:
            jijin = result.find(id="catalog_FUND").parent.get_text()
            jijin = jijin[3:].replace('\r\n', '').replace(' ', '')
        except:
            jijin = ''
            print("can't find  " + ssData['paperName'].encode('gb18030') + "'s jijin")
        try:
            fenLeiHao = result.find(id='catalog_ZTCLS').parent.get_text()
            fenLeiHao = fenLeiHao[4:]
        except:
            fenLeiHao = ''
            print("can't find " + ssData['paperName'].encode('gb18030') + "'s fenLeiHao")
        try:
            guanjianci = result.find(id='catalog_KEYWORD').parent.get_text()
            guanjianci = guanjianci[4:].replace("\r\n", '').replace(' ', '')
        except:
            guanjianci = ''
            print("can't find  " + ssData['paperName'].encode('gb18030') + "'s guanjianci")
        try:
            suoyouzuozhe = bs4.find(class_="author").contents
            paper = ''
            for zuozhe in suoyouzuozhe:
                paper += zuozhe.get_text()
                paper += ";"
        except:
            paper = ''
            print("can't find  " + ssData['paperName'].encode('gb18030') + "'s suoyouzuozhe")

        item = AddPaperKeyWordItem()
        item['abstract'] = zhaiyao.encode('gb18030')
        item['keyWord'] = guanjianci.encode('gb18030')
        item['classNum'] = fenLeiHao.encode('gb18030')
        item['allAuthor'] = paper.encode('gb18030')
        item['fund'] = jijin.encode('gb18030')
        item['paperName'] = ssData['paperName'].encode('gb18030')
        item['paperID'] = ssData['paperID'].encode('gb18030')
        item['entityId'] = ssData['entityId'].encode('gb18030')
        item['paperTime'] = ssData['paperTime'].encode('gb18030')
        item['paperLink'] = ssData['paperLink'].encode('gb18030')
        item['paperAuthor'] = ssData['paperAuthor'].encode('gb18030')
        item['baiduLink'] = ssData['baiduLink'].encode('gb18030')
        return item