# coding=utf-8
import difflib
import json
import random
from urllib import urlencode

import scrapy
import time
from bs4 import BeautifulSoup
from scrapy.spiders import CrawlSpider

from myTest.item.PatentItem import PatentItem
from myTest.utils.getdata import GetData
import logging


class PatentSpider(CrawlSpider):
    name = "patentInfo"
    base_url = "http://kns.cnki.net/kns/"
    data_block = None
    max_block_count = 63

    def __init__(self, data_block=None, *a, **kw):
        super(PatentSpider, self).__init__(*a, **kw)
        if data_block is not None:
            self.data_block = int(data_block)

    # 发送一个请求
    """
            http://kns.cnki.net/kns/request/SearchHandler.ashx  get方法
        
            参数:
        action=
        NaviCode=*
        ua=1.21
        PageName=ASP.brief_result_aspx
        DbPrefix=SCOD
        DbCatalog=专利数据总库
        ConfigFile=SCOD.xml
        db_opt=SCOD
        db_value=中国专利数据库,国外专利数据库
        txt_1_sel=SQR
        txt_1_value1=沈阳农业大学
        txt_1_relation=#CNKI_AND
        txt_1_special1= =（这里是等号）
        txt_2_sel=FMR
        txt_2_value1=范昊明
        txt_2_logical=and
        txt_2_relation=#CNKI_AND
        txt_2_special1= =（等号）
        his=0
        __=Thu Jan 11 2018 13:10:09 GMT+0800 时间
    """

    def start_requests(self):

        # 修改参数，加载不同的url
        search_base_url = self.base_url + "request/SearchHandler.ashx"
        if self.data_block is None or self.data_block > self.max_block_count:
            self.data_block = random.randint(1, self.max_block_count)
            logging.warning("data_block is None or over max_block_count\nwill run with block {} by random".
                            format(self.data_block))

        ssDataList = GetData.get_orial_data_block(self.data_block)

        urlParams = {
            "action": "",
            "NaviCode": "*",
            "ua": 1.21,
            "PageName": "ASP.brief_result_aspx",
            "DbPrefix": "SCOD",
            "DbCatalog": u"专利数据总库".encode("utf-8"),
            "ConfigFile": "SCOD.xml",
            "db_opt": "SCOD",
            "db_value": "中国专利数据库,国外专利数据库",
            "txt_1_sel": "SQR",
            "txt_1_value1": None,
            "txt_1_relation": "#CNKI_AND",
            "txt_1_special1": "=",
            "txt_2_sel": "FMR",
            "txt_2_value1": "范昊明",
            "txt_2_logical": "and",
            "txt_2_relation": "#CNKI_AND",
            "txt_2_special1": "=",
            "his": 0,
            "__": "Thu Jan 11 2018 13:10:09 GMT+0800"
        }
        i = -1
        for ssData in ssDataList:
            i += 1
            urlParams["txt_2_value1"] = ssData["scholar"].encode('utf-8')
            urlParams["txt_1_value1"] = ssData["school"].encode('utf-8')
            urlParams["__"] = ""
            url = search_base_url + "?" + urlencode(urlParams)
            yield scrapy.Request(url=url, callback=self.get_basic_patent_info,
                                 meta={'ssData': ssData, 'cookiejar': i})

    # 2
    # http: // kns.cnki.net / kns / brief / brief.aspx
    # get方法
    #
    # pagename = ASP.brief_result_aspx
    # dbPrefix = SCOD
    # dbCatalog = 专利数据总库
    # ConfigFile = SCOD.xml
    # research = off
    # t = 1515641814469 // 时间
    # keyValue = 沈阳农业大学
    # S = 1
    def get_basic_patent_info(self, response):
        ssData = response.meta['ssData']

        brief_url = self.base_url + "brief/brief.aspx"
        urlParams = {
            "pagename": "ASP.brief_result_aspx",
            "dbPrefix": "SCOD",
            "dbCatalog": u"专利数据总库".encode("utf-8"),
            "ConfigFile": "SCOD.xml",
            "research": "off",
            "t": int(time.time() * 1000),
            "keyValue": ssData["school"].encode('utf-8'),
            "S": 1
        }
        url = brief_url + "?" + urlencode(urlParams)
        yield scrapy.Request(url=url, callback=self.get_more_patent_info,
                             meta={'ssData': ssData, 'cookiejar': response.meta['cookiejar']})

    def get_more_patent_info(self, response):
        ssData = response.meta['ssData']
        text = response.text
        bs4 = BeautifulSoup(text, 'lxml')
        top = bs4.find('table', class_="GridTableContent")
        try:
            tags = top.find_all('tr')
        except AttributeError:
            logging.error(u"访问异常，没有获取到table of {} from {}".format(ssData['scholar'],ssData['school']))

        for tag in tags:
            if tag.has_attr('class'):
                continue
            item = PatentItem()
            patentBasicLink = "http://kns.cnki.net"
            tdList = tag.find_all("td")
            item['personName'] = ssData["scholar"]
            item['scholarNo'] = ssData['scholarNo']
            item['patentName'] = tdList[1].a.text
            # patentLink 需要二次爬取
            item['patentLink'] = patentBasicLink + tdList[1].a['href']
            item['creators'] = tdList[2].get_text()
            item['appliers'] = tdList[3].get_text()
            item['dataBase'] = tdList[4].get_text()
            item['applyDate'] = tdList[5].get_text()
            item['openDate'] = tdList[6].get_text()
            yield scrapy.Request(url=item['patentLink'], callback=self.get_real_link,
                                 meta={'item': item, 'cookiejar': response.meta['cookiejar']})
        nextPage = bs4.find_all("a", string=u"下一页")
        brief_url = self.base_url + "brief/brief.aspx"
        if len(nextPage) > 1:
            nextUrl = brief_url + nextPage[0].attrs['href']
            yield scrapy.Request(url=nextUrl, callback=self.get_more_patent_info,
                                 meta={'ssData': ssData, 'cookiejar': response.meta['cookiejar']})

    def get_real_link(self, response):
        item = response.meta['item']
        item['patentLink'] = response.url
        return item


