# coding=utf-8

import random
from urllib import urlencode

import scrapy
import time
from bs4 import BeautifulSoup
from scrapy.spiders import CrawlSpider

from myTest.item.referenceItem import ReferenceItem
from myTest.utils.getdata import GetData
import logging




class ReferenceSpider(CrawlSpider):
    name = "referenceInfo"
    base_url = "http://kns.cnki.net/kns/"
    data_block = None
    max_block_count = 62

    def __init__(self, data_block=None, *a, **kw):
        super(ReferenceSpider, self).__init__(*a, **kw)
        if data_block is not None:
            self.data_block = int(data_block)

    def start_requests(self):
        search_base_url = self.base_url + "request/SearchHandler.ashx"
        if self.data_block is None or self.data_block > self.max_block_count:
            self.data_block = random.randint(1, self.max_block_count)
            logging.warning("data_block is None or over max_block_count\nwill run with block {} by random".
                            format(self.data_block))
        urlParams = {
            "action": "",
            "NaviCode": "*",
            "ua": 1.11,
            "PageName": "ASP.brief_default_result_aspx",
            "DbPrefix": "SCDB",
            "DbCatalog": u"中国学术文献网络出版总库".encode("utf-8"),
            "ConfigFile": "SCDBINDEX.xml",
            "db_opt": "CJFQ,CJRF,CDFD,CMFD,CPFD,IPFD,CCND,CCJD",
            "txt_1_sel": "SU$%=|",
            "txt_1_special1": "%",
            "txt_1_value1": None,
            "his": 0,
            "parentdb": "SCDB",
            "__": "Thu Jan 11 2018 13:10:09 GMT+0800"
        }
        ssDataList = GetData.get_more_paper_data_block(self.data_block)

        i = -1
        for ssData in ssDataList:
            i += 1
            # urlParams["txt_1_value1"] = ssData["paperName"].encode('utf-8')
            urlParams["txt_1_value1"] = ssData["paperName"].encode('utf-8')+' '+ssData['paperAuthor'].encode('utf-8')
            urlParams["__"] = ""
            url = search_base_url + "?" + urlencode(urlParams)
            yield scrapy.Request(url, callback=self.first_time_find,
                                 meta={'ssData': ssData, 'cookiejar': i})

    def first_time_find(self, response):
        ssData = response.meta['ssData']
        brief_url = self.base_url + "brief/brief.aspx"
        urlParams = {
            "pagename": "ASP.brief_default_result_aspx",
            "dbPrefix": "SCDB",
            "dbCatalog": u"中国学术文献网络出版总库".encode("utf-8"),
            "ConfigFile": "SCDBINDEX.xml",
            "research": "off",
            "DisplayMode":"listmode",
            "t": int(time.time() * 1000),
            "keyValue": None,
            "S": 1
        }
        # urlParams["txt_1_value1"] = ssData["paperName"].encode('utf-8')
        urlParams["txt_1_value1"] = ssData["paperName"].encode('utf-8')+' '.encode('utf-8')+ssData['paperAuthor'].encode('utf-8')
        url = brief_url + "?" + urlencode(urlParams)
        yield scrapy.Request(url=url, callback=self.get_paperLink_info,
                             meta={'flag':1,'url':url,'ssData': ssData, 'cookiejar': response.meta['cookiejar']})

    def get_paperLink_info(self, response):
        url = response.url
        ssData = response.meta['ssData']
        text = response.text
        bs4 = BeautifulSoup(text, 'lxml')
        top = bs4.find(bgcolor="#ffffff")

        try:
            href = top.find('a', class_="fz14")
            paperLink = href.get('href')
            paperLink = "http://kns.cnki.net" + paperLink
        except:
            # print "First time: don't find"+ssData['paperName'] + '  by  ' + ssData['paperAuthor']
            # print "First time: don't find"+ssData['paperName'].encode('gb18030') + '  by  ' + ssData['paperAuthor'].ecode('gb18030')
            flag = response.meta['flag']
            if flag ==1:
                flag = 2
                yield scrapy.Request(url=url, callback=self.second_time_start,
                                     meta={'flag': flag, 'ssData': ssData, 'cookiejar': response.meta['cookiejar']})
            else:

                # print "Second time: don't"+ssData['paperName']+ '  by  ' + ssData['paperAuthor']+ 'without Author'
                print "Second time: don't" + ssData['paperName'].encode('gb18030') + ' by ' + ssData['paperAuthor'].ecode('gb18030') + 'without Author'

        ssData['paperLink'] = paperLink
        yield scrapy.Request(url=paperLink, callback=self.get_reference_link,
                             meta={'ssData': ssData, 'cookiejar': response.meta['cookiejar']})

    def second_time_start(self,response):
        flag = response.meta['flag']
        ssData = response.meta['ssData']
        search_base_url = "http://kns.cnki.net/kns/request/SearchHandler.ashx"
        if self.data_block is None or self.data_block > self.max_block_count:
            self.data_block = random.randint(1, self.max_block_count)
            logging.warning("data_block is None or over max_block_count\nwill run with block {} by random".
                            format(self.data_block))
        urlParams = {
            "action": "",
            "NaviCode": "*",
            "ua": 1.11,
            "PageName": "ASP.brief_default_result_aspx",
            "DbPrefix": "SCDB",
            "DbCatalog": u"中国学术文献网络出版总库".encode("utf-8"),
            "ConfigFile": "SCDBINDEX.xml",
            "db_opt": "CJFQ,CJRF,CDFD,CMFD,CPFD,IPFD,CCND,CCJD",
            "txt_1_sel": "SU$%=|",
            "txt_1_special1": "%",
            "txt_1_value1": None,
            "his": 0,
            "parentdb": "SCDB",
            "__": "Thu Jan 11 2018 13:10:09 GMT+0800"
        }
        urlParams["txt_1_value1"] = ssData["paperName"].encode('utf-8')
        # urlParams["txt_1_value1"] = ssData["paperName"].encode('utf-8')+' '.encode('utf-8')+ssData['paperAuthor'].encode('utf-8')
        url = search_base_url + "?" + urlencode(urlParams)
        yield scrapy.Request(url=url, callback=self.second_time_find,
                             meta={'flag':flag,'ssData': ssData, 'cookiejar': response.meta['cookiejar']})
    def second_time_find(self,response):
        ssData = response.meta['ssData']
        flag = response.meta['flag']
        brief_url = self.base_url + "brief/brief.aspx"
        urlParams = {
            "pagename": "ASP.brief_default_result_aspx",
            "dbPrefix": "SCDB",
            "dbCatalog": u"中国学术文献网络出版总库".encode("utf-8"),
            "ConfigFile": "SCDBINDEX.xml",
            "research": "off",
            "DisplayMode": "listmode",
            "t": int(time.time() * 1000),
            "keyValue": None,
            "S": 1
        }
        urlParams["txt_1_value1"] = ssData["paperName"].encode('utf-8')
        # urlParams["txt_1_value1"] = ssData["paperName"].encode('utf-8')+' '.encode('utf-8')+ssData['paperAuthor'].encode('utf-8')
        url = brief_url + "?" + urlencode(urlParams)
        yield scrapy.Request(url=url, callback=self.get_paperLink_info,
                             meta={'flag':flag,'url': url, 'ssData': ssData, 'cookiejar': response.meta['cookiejar']})


    def get_reference_link(self,response):
        ssData = response.meta['ssData']
        link = ssData['paperLink']
        canshus = link.split("?")[1].split("&")
        filename = canshus[3].split("=")[1]
        dbcode = canshus[5].split("=")[1]
        dbname = canshus[4].split("=")[1]
        RefType = canshus[1].split("=")[1]

        urlParams = {
            "dbcode":dbcode,
            "filename":filename,
            "dbname":dbname,
            "RefType":1,
            "vl":""
        }
        url_bath = "http://kns.cnki.net/kcms/detail/frame/list.aspx?"
        url = url_bath + urlencode(urlParams)
        yield scrapy.Request(url=url, callback=self.get_addrefer_info2,
                             meta={'ssData': ssData, 'cookiejar': response.meta['cookiejar']})

    def get_addrefer_info2(self,response):
        ssData = response.meta['ssData']
        bs4 = BeautifulSoup(response.text,'lxml')

        essayBoxs = bs4.find_all(class_='essayBox')


        for essayBox in essayBoxs:
            references = essayBox.find_all('li')
            dbTitle = essayBox.find(class_="dbTitle").get_text().split("\r\n")[0]
            for refer in references:
                item = ReferenceItem()
                information = refer.get_text().replace("\r\n",'&&').replace(' ','')
                href = refer.find('a').get('href')
                item['paperID'] = ssData['paperID']
                item['entityId'] = ssData['entityId']
                item['paperName'] = ssData['paperName']
                item['paperAuthor'] = ssData['paperAuthor']
                item['baiduLink'] = ssData['baiduLink']
                item['paperLink'] = ssData['paperLink']
                item['referenceName'] = information
                if href ==None:
                    href=''
                item['referenceLink'] = href
                item['referenceType'] = dbTitle
                yield scrapy.Request(url=item['baiduLink'], callback=self.get_finlly,
                                     meta={'item': item, 'cookiejar': response.meta['cookiejar']})


            # if '中国图书全文数据库'.encode('utf-8') in dbTitle:
            #     for reference in references:
            #         info = reference.contents
            #         infos = info[1].split("\r\n")
            #         referenceName = infos[0]
            #         referenceZuozhe = infos[3]
            #         referenceTime = infos[4]
            #         referenceFrom =infos[1]
            #         referenceLink =''
            #         referenceType = dbTitle
            # elif '外文题录数据库'.encode('utf-8') in dbTitle:
            #     for reference in references:
            #         info = reference.contents
            #         referenceName = info[1].get_text()
            #         referenceInfos =  info[2].replace("\r\n",'*').replace(' ','')
            #         referenceTime = referenceInfos.split('*.')[1]
            #         referenceZuozhe = referenceInfos.split('*.')[0]
            #         referenceType = dbTitle
            #         referenceLink = ''
            # elif '国际期刊数据库'.encode('utf-8') in dbTitle:
            #     for reference in references:
            #         info = reference.contents
            #         referenceType = dbTitle
            #         referenceLink = info[1].get('href')
            #         referenceName =info[1].get_text()
            #         infos = info[2].replace(" ",'').split('\r\n')
            #         referenceZuozhe = infos[1].split('&nbsp&nbsp')[0]
            #         referenceFrom = infos[1].split('&nbsp&nbsp')[1]
            #         referenceTime = infos[2]
            # elif '中国学术期刊网络出版总库'.encode('utf-8') in dbTitle:
            #     for reference in references:
            #         info = reference.contents
            #         referenceLink = info[1].get("href")
            #         referenceZuozhe = info[2].split("&")[0]
            #         referenceFrom = info[3].get_text()
            #         referenceTime = info[5].get_text().replace("\r\n",'').replace(' ','')
            #         referenceType = dbTitle
            # elif '中国优秀硕士学位论文全文数据库'.encode('utf-8') in dbTitle:
            #     for reference in references:
            #         info = reference.contents
            #         referenceType = dbTitle
            #         referenceTime = info[4]
            #         referenceFrom = info[3].get_text()
            #         referenceZuozhe = info[2]
            #         referenceLink = info[1].get('href')
            #         referenceName = info[1].get_text()
            # elif '中国博士学位论文全文数据库'.encode('utf-8') in dbTitle:
            #     for reference in references:
            #         info = reference.contents
            #         referenceType = dbTitle
            #         print(info)
            # elif '国际会议论文全文数据库'.encode('utf-8') in dbTitle:
            #     for reference in references:
            #         info = reference.contents
            #         referenceType = dbTitle
            #         print(info)
            # elif '中国标准数据库'.encode('utf-8') in dbTitle:
            #     for reference in references:
            #         info = reference.contents
            #         referenceType = dbTitle
            #         referenceLink = info[2].get('href')
            #         referenceName = info[2].get_text()
            #         referenceTime = info[3]
            #         referenceZuozhe = info[1].replace("\r\n",'')
            # elif '中国重要会议论文全文数据库'.encode('utf-8') in dbTitle:
            #     for reference in references:
            #         info = reference.contents
            #         referenceType = dbTitle
            #         print(info)
            # elif '中国重要报纸全文数据库'.encode('utf-8') in dbTitle:
            #
            #     for reference in references:
            #
            #         info = reference.contents
            #         referenceType = dbTitle
            #         referenceLink = info[1].get('href')
            #         referenceName = info[1].get_text()
            #         infos = info[2].split('\r\n')
            #         referenceZuozhe = infos[1].split('&nbsp&nbsp')[0]
            #         referenceFrom =  infos[1].split('&nbsp&nbsp')[1]
            #         referenceTime = infos[3].replace(' ','')
            # else:
            #     for reference in references:
            #         info = reference.contents
            #
            #         print("好难啊，我要在去分析一波，先记录一下下")
            #         print(ssData['paperName']+" by "+ssData['paperAuthor'])
            #
            # # for reference in references:
            # #     info = reference.contents
            # #     if len(info)>=6:
            # #         refenceName = info[1].get_text()
            # #         refenceLink = info[1].get("href")
            # #         referenceZuozhe = info[2].split("&")[0]
            # #         referenceFrom = info[3].get_text()
            # #         referenceTime = info[5].get_text().replace("\r\n", '').replace(' ', '')
            # #         referenceInfo = ''
            # #     else:
            # #         referenceInfo = reference.get_text().replace("\r\n", '').replace(' ', '')
            # #         refenceName =''
            # #         referenceZuozhe=''
            # #         referenceFrom =''
            # #         referenceTime = ''
            #
            #     # item['referenceInfo'] =referenceInfo
            #     item['referenceName'] = referenceName
            #     item['referenceAuthor'] = referenceZuozhe
            #     item['referenceFrom'] = referenceFrom
            #     item['referenceTime'] = referenceTime
            #     item['referenceLink'] = referenceLink
            #     item['referenceType'] = referenceType
            #     item['paperID'] = ssData['paperID']
            #     item['entityId'] = ssData['entityId']
            #     item['paperName'] = ssData['paperName']
            #     item['paperAuthor'] = ssData['paperAuthor']
            #     item['paperLink'] = ssData['paperLink']
            #
            #     yield scrapy.Request(url="https://www.baidu.com/", callback=self.back_info,
            #                          meta={'item': item})
            nextPage = essayBoxs.find_all("a", string=u"下一页")
            url_bath = "http://kns.cnki.net/kcms/detail/frame/list.aspx?"
            if len(nextPage) > 1:
                nextUrl = url_bath + nextPage[0].attrs['href']
                yield scrapy.Request(url=nextUrl, callback=self.get_addrefer_info,
                                     meta={'ssData': ssData, 'cookiejar': response.meta['cookiejar']})

    def get_finlly(self,response):
        item = response.meta['item']
        return item