# coding=utf-8

import scrapy
from bs4 import BeautifulSoup
from scrapy.spiders import CrawlSpider

from myTest.utils.getdata import GetData
from myTest.item.PaperItem import PaperItem


class PaperInfoSpider(CrawlSpider):
    name = "paperInfo"
    base_url = "http://xueshu.baidu.com"
    data_block = None
    max_block_count = 62

    def __init__(self, data_block=None, *a, **kw):
        self.data_block = int(data_block)
        super(PaperInfoSpider, self).__init__(*a, **kw)

    def start_requests(self):
        # 修改参数，加载不同的url
        search_base_url = self.base_url + "/scholarID/"
        paperDataList = GetData.get_data_from_orial(self.data_block)
        #  从数据块中读取信息
        for paperData in paperDataList:
            item = PaperItem()
            item["scholarId"] = paperData["scholarId"]
            item["personName"] = paperData["personName"]
            item["entityId"] = paperData["entityId"]
            entity_id = paperData["entityId"].encode('utf-8')
            paperTypes = [1, 3, 4]
            for paper_type in paperTypes:
                yield scrapy.FormRequest(url='http://xueshu.baidu.com/usercenter/data/author',
                                         formdata={'cmd': 'academic_paper',
                                                   'entity_id': entity_id,
                                                   'bsToken': 'the+fisrt+two+args+should+be+string+type:0,1!',
                                                   'sc_sort': 'sc_time',
                                                   'paper_type': str(paper_type),
                                                   'curPageNum': '1'
                                                   },
                                         callback=self.get_page_num,
                                         meta={'paperInfo': item,
                                               'entity_id': entity_id,
                                               'paper_type': paper_type
                                               }
                                         )

    def get_page_num(self, response):
        paper_type = response.meta['paper_type']
        orialItem = response.meta['paperInfo'].copy()
        orialItem['articleType'] = paper_type
        entity_id = response.meta['entity_id']
        responseJson = response.body
        bs = BeautifulSoup(responseJson, "lxml")
        pageNum = bs.find_all(class_='res-page-number pagenumber')
        if pageNum:
            pageNum1 = bs.find_all(class_='res-page-number pagenumber')[-1].text
            pageNum1 = int(pageNum1) + 1
            for i in range(1, pageNum1):
                newItem = orialItem.copy()
                yield scrapy.FormRequest(url='http://xueshu.baidu.com/usercenter/data/author',
                                         formdata={'cmd': 'academic_paper',
                                                   'entity_id': entity_id,
                                                   'bsToken': 'the+fisrt+two+args+should+be+string+type:0,1!',
                                                   'sc_sort': 'sc_time',
                                                   'paper_type': str(paper_type),
                                                   'curPageNum': str(i)
                                                   },
                                         callback=self.get_paper_info,
                                         meta={'paperInfo': newItem}
                                         )
        else:
            yield scrapy.FormRequest(url='http://xueshu.baidu.com/usercenter/data/author',
                                     formdata={'cmd': 'academic_paper',
                                               'entity_id': entity_id,
                                               'bsToken': 'the+fisrt+two+args+should+be+string+type:0,1!',
                                               'sc_sort': 'sc_time',
                                               'paper_type': str(paper_type),
                                               'curPageNum': '1'
                                               },
                                     callback=self.get_paper_info,
                                     meta={'paperInfo': orialItem}
                                     )

    def get_paper_info(self, response):
        responseJson = response.body
        bs = BeautifulSoup(responseJson, "lxml")
        resultItemList = bs.select(".result")
        paper_type_name = [None, u'期刊', None, u'会议', u'专著']
        orialItem = response.meta['paperInfo']
        itemList = []
        if resultItemList:
            for result in resultItemList:
                item = orialItem.copy()
                paperFrom = result.find(class_='res_info').contents
                if len(paperFrom) == 7:
                    item['paperFrom'] = paperFrom[-3].text  # 找到发表源
                    item['paperAuthor'] = result.find(class_='res_info').contents[2].text  # 找到其他部分作者
                    item['paperTime'] = result.find(class_='res_year').text  # 找到发表时间，精确到年
                item['paperName'] = result.find(class_='res_t').a.text  # 找到文章的名字
                http = result.h3.a['href']
                item['paperLink'] = 'http:' + http  # 找到文章的链接

                try:
                    item['quoteNum'] = result.find(class_="cite_cont").text
                except AttributeError:
                    item['quoteNum']=""
                    pass
                item['articleType'] = paper_type_name[int(item['articleType'])]
                itemList.append(item)
            return itemList
