# coding=utf-8
import difflib
import json
import random
from urllib import urlencode

import scrapy
from bs4 import BeautifulSoup
from scrapy.spiders import CrawlSpider

from myTest.item.ScholarItem import ScholarItem
from myTest.utils.getdata import GetData
import logging


class ScholarInfoSpider(CrawlSpider):
    name = "scholarInfo"
    base_url = "http://xueshu.baidu.com"
    data_block = None
    max_block_count = 63

    def __init__(self, data_block=None, *a, **kw):
        super(ScholarInfoSpider, self).__init__(*a, **kw)
        if data_block is not None:
            self.data_block = int(data_block)

    def start_requests(self):

        # 修改参数，加载不同的url
        search_base_url = self.base_url + "/usercenter/data/authorchannel"
        if self.data_block is None or self.data_block > self.max_block_count:
            self.data_block = random.randint(1, self.max_block_count)
            logging.warning("data_block is None or over max_block_count\nwill run with block {} by random".
                            format(self.data_block))

        ssDataList = GetData.get_orial_data_block(self.data_block)

        urlParams = {
            "cmd": "search_author",  # 固定值 不要变
            "author": None,  # 作者
            "affiliate": None,  # 学校
            "curPageNum": None,  # 当前页码，从1 到最大值
        }
        i = -1
        for ssData in ssDataList:
            i += 1
            urlParams["author"] = ssData["scholar"].encode('utf-8')
            urlParams["affiliate"] = ssData["school"].encode('utf-8')
            urlParams["curPageNum"] = 1
            url = search_base_url + "?" + urlencode(urlParams)
            yield scrapy.Request(url=url, callback=self.get_basic_scholar_info,
                                 meta={'ssData': ssData, 'cookiejar': i})

    def get_basic_scholar_info(self, response):
        item = ScholarItem()
        responseJson = json.loads(response.text, encoding="utf-8")
        htmlData = responseJson['htmldata']
        bs = BeautifulSoup(htmlData, "lxml")
        resultItemList = bs.select(".searchResultItem")
        ssData = response.meta['ssData']
        getResult = False
        for resultItem in resultItemList:
            personA = resultItem.find(class_="personName")
            """先取出姓名，学校，研究领域三个量 和给定数据中的 scholar school major 对照"""
            item['personName'] = personA.text
            item['institution'] = resultItem.find(class_="personInstitution").text

            item['aFiled'] = resultItem.find(class_="aFiled")
            if item['aFiled'] is not None:
                item['aFiled'] = item['aFiled'].text
            """如果姓名一致，学校一致，研究方向相似（todo）"""

            # 判断字符串相似的函数 ok 可能不是很准确 需要慢慢测试
            schoolInclude = item['institution'].startswith(ssData['school'])
            schoolDiff = difflib.SequenceMatcher(None, item['institution'], ssData['school'] + ssData['major']).ratio()
            majorDiff = 0.0
            if item['aFiled'] is not None:
                majorDiff = difflib.SequenceMatcher(None, ssData['major'], item['aFiled']).ratio()
            if item['personName'] == ssData['scholar'] and (schoolInclude or schoolDiff > 0.9 or majorDiff > 0.5):
                getResult = True
                item['scholarNo'] = ssData['scholarNo']
                item['personLink'] = self.base_url + personA.attrs['href']
                item['entityId'] = str(item['personLink']).split('/')[-1]
                item['articleNum'] = resultItem.find(class_="articleNum")
                if item['articleNum'] is not None:
                    item['articleNum'] = item['articleNum'].text
                item['quoteNum'] = resultItem.find(class_="quoteNum")
                if item['quoteNum'] is not None:
                    item['quoteNum'] = item['quoteNum'].text
                yield scrapy.Request(url=item['personLink'], callback=self.get_more_scholar_info,
                                     meta={'scholarInfo': item, 'cookiejar': response.meta['cookiejar']})
                break
        if not getResult and htmlData.find(u"未检索到任何结果") == -1:
            logging.error(
                u"cant find {} from {} byId {}".format(ssData['scholar'], ssData['school'], ssData['scholarNo']))

    # http://xueshu.baidu.com/usercenter/data/author
    # post
    # 请求
    # cmd = show_co_affiliate
    # entity_id = 69e4836a316ffcfe36bdbafa8bff92c1

    def get_more_scholar_info(self, response):
        scholarInfo = response.meta['scholarInfo']
        scholarInfo['scholarId'] = response.css(".p_scholarID_id::text").extract_first()
        resultItemList = BeautifulSoup(response.body, 'lxml').select('.content_wr')
        for resultItem in resultItemList:
            Indexs = resultItem.find_all(class_='p_ach_num')
            if Indexs is not None:
                scholarInfo['HIndex'] = Indexs[-2].text
                scholarInfo['GIndex'] = Indexs[-1].text
        urlParams = {
            "cmd": "show_co_affiliate",
            "entity_id": scholarInfo['entityId']
        }


        hIndex = response.xpath('//*[@id="author_intro_wr"]/div[2]/ul/li[3]/p[2]/text()').extract()[0]
        gIndex = response.xpath('//*[@id="author_intro_wr"]/div[2]/ul/li[4]/p[2]/text()').extract()[0]
        scholarInfo['hIndex'] = hIndex
        scholarInfo['gIndex'] = gIndex


        return scrapy.FormRequest(url='http://xueshu.baidu.com/usercenter/data/author',
                                  formdata=urlParams,
                                  callback=self.get_final_result,
                                  meta={'scholarInfo': scholarInfo, 'cookiejar': response.meta['cookiejar']}
                                  )



# 查找  合作学者的函数
    def get_final_result(self, response):
        scholarInfo = response.meta['scholarInfo']
        coPersonList = response.css(".co_person_name::text").extract()
        scholarInfo['coPerson'] = coPersonList
        return scholarInfo
