import scrapy
from selenium import webdriver
from selenium.webdriver import FirefoxOptions
from selenium.webdriver.common.keys import Keys
from amineruser.items import AminerItem
import xlrd
from scrapy import Request
from scrapy.http import HtmlResponse, Response
from lxml import etree
import re
import time
import pymysql
import datetime
import random

#获取职称过滤词表
TitleExcelFile=xlrd.open_workbook(r'titlekeywords.xlsx')
sheet=TitleExcelFile.sheet_by_name('职称')
author_title_keywords=sheet.col_values(0)
#获取关键词字符串列表,并根据网站界面类型分为两类关键词
ExcelFile=xlrd.open_workbook(r'keywords.xlsx')
sheet=ExcelFile.sheet_by_name('关键词')
#cols_index=sheet.col_values(5)
dict_keywords=sheet.col_values(0)
ids_keywords=sheet.col_values(1)
def have_chinese(uchar):
    zhmodel = re.compile(u'[\u4e00-\u9fa5]')    #检查字符串中是否有中文
    match = zhmodel.search(uchar)
    if match:
        return True
    else:
        return False

def is_chinese(uchar):
    """判断一个unicode是否是汉字"""
    uchar0=''
    try:
        uchar0 = uchar[0]
        print(uchar)
    except Exception as e:
        pass
    if uchar0 >= u'\u4e00' and uchar0<=u'\u9fa5' or uchar0=='<' or uchar0=='《':
        return True
    else:
        return False
def filter_title(str):
    #对包含职称的字符串进行过滤，分离职称和机构
    title=None
    organization=str
    organization=organization.lstrip(' ')
    organization = organization.lstrip('/')
    organization = organization.lstrip('.')
    for author_title in author_title_keywords:#通过职称关键词表过滤职称
        if author_title in str:
            title=author_title
            organization=(''.join(str.split(author_title)))
            organization=organization.lstrip(' ')
            organization = organization.lstrip('/')
            organization=organization.lstrip(' ')
            return title, organization
    organization=organization.lstrip(' ')
    organization = organization.lstrip('/')
    organization=organization.lstrip(' ')
    return title,organization

def nation_title(str):
    if(str==None):
        return None
    #根据机构信息过滤并匹配国家名称
    f = open("nationkeywords.txt")
    organization_nation=str.split(',')[-1].lstrip(' ')
    lines = f.readlines()
    for line in lines:
        line_split=line.split(',')
        for words in line_split:
            if organization_nation in words:
                f.close()
                return line_split[0]
    f.close()
    return None


class AminerSpider(scrapy.Spider):
    """
    scrapy框架只能爬取静态网站。如需爬取动态网站，需要结合着selenium进行js的渲染，才能获取到动态加载的数据。
    如何通过selenium请求url，而不再通过下载器Downloader去请求这个url?
    方法：在request对象通过中间件的时候，在中间件内部开始使用selenium去请求url，并且会得到url对应的源码，然后再将源代码通过response对象返回，直接交给process_response()进行处理，再交给引擎。过程中相当于后续中间件的process_request()以及Downloader都跳过了。
    """
    name = 'aminer'
    allowed_domains = ['aminer.cn']
    #start_urls = ["https://www.aminer.cn/search/pub?t=b&q=载人潜水器&forceType=false"]
    #url0='https://www.aminer.cn/search/pub?t=b&q=水下照明&forceType=false'
    #url1 = 'https://www.aminer.cn/search/pub?t=b&q=水下照明&forceType=false'
    def __init__(self):
        # 在初始化对象时，创建driver
        super(AminerSpider, self).__init__(name='aminer')
        option = FirefoxOptions()
        option.headless = True
        self.driver = webdriver.Firefox(options=option)
        self.conn = pymysql.connect(host="rm-bp151dcc75aycqd80to.mysql.rds.aliyuncs.com", user="root", passwd="Iscas123",
                               db="knowledgeproject_new", port=3306, charset='utf8')
    def start_requests(self):
        #urls = 'https://www.aminer.cn/search/pub?t=b&q='+'海龙'+ '&forceType=false'
        global id_keywords
        for i in range(len(dict_keywords)):
            keywords=dict_keywords[i]
            id_keywords=ids_keywords[i]
            urls='https://www.aminer.cn/search/pub?t=b&q='+keywords+'&forceType=false'
            yield Request(url=urls, callback=self.parse_type)
        """
        for keywords in dict_keywords:
            urls='https://www.aminer.cn/search/pub?t=b&q='+keywords+'&forceType=false'
            yield Request(url=urls, callback=self.parse_type)
            """


    def parse_type(self, response):
        """
        提取每个关键词列表页的信息
        :param response:
        :return:
        """
        judge=response.xpath('/html/body/div/section/main/article/div/div[3]/div[2]/div[2]/div[1]/div[4]/div[1]/div[1]/div/div[2]/div[1]/span/span').extract_first('')
        if judge=="":
        #print('Crawling',response.url,'\n')
            current_page=1#当前检索页数
            res=response
            print('当前第', current_page, '页\n')
            info_divs = res.xpath("//div[@class='paper-item']")
            urls_page=[]
            for div in info_divs:
                # 爬取信息页，包括一作二作，标题，摘要，链接，出版年份
                urls_page.append('https://www.aminer.cn' + (div.xpath('./div[1]/p/a/@href')).extract_first(''))
            time.sleep(random.randrange(3,5))
            input = self.driver.find_element_by_xpath(
                "//*[@id='root']/section/main/article/div/div[3]/div[2]/div[2]/div[1]/div[4]/div[2]/ul/li[10]/div//input")
            # input =self.driver.find_element_by_xpath(" //*[@class='ant-pagination-options']/div[1]/input[1]")
            current_page = current_page + 1
            input.send_keys(str(current_page))  # current_page为循环的数字
            input.send_keys(Keys.ENTER)
            time.sleep(random.randrange(3,5))
            # print(self.driver.page_source)
            res = etree.HTML(self.driver.page_source)
            while(current_page<11):
                print('当前第',current_page,'页\n')
                info_divs = res.xpath("//div[@class='paper-item']")
                for div in info_divs:
                    urls_page.append('https://www.aminer.cn' + str(div.xpath('./div[1]/p/a/@href')).split("'")[1])
                time.sleep(3)
                #通过模拟浏览器界面在页数位置输入数字，实现翻页操作
                input =self.driver.find_element_by_xpath(
                        "//*[@id='root']/section/main/article/div/div[3]/div[2]/div[2]/div[1]/div[4]/div[2]/ul//input")
                #input =self.driver.find_element_by_xpath(" //*[@class='ant-pagination-options']/div[1]/input[1]")
                current_page=current_page+1
                input.send_keys(str(current_page))  # current_page为循环的数字
                input.send_keys(Keys.ENTER)
                time.sleep(3)
                #print(self.driver.page_source)
                res=etree.HTML(self.driver.page_source)
            for url_page in urls_page:
                yield Request(url=url_page, callback=self.parse_page)
        else:
            current_page = 1  # 当前检索页数
            res = response
            print('当前第', current_page, '页\n')
            info_divs = res.xpath("//div[@class='paper-item']")
            urls_page = []
            for div in info_divs:
                # 爬取信息页，包括一作二作，标题，摘要，链接，出版年份
                urls_page.append('https://www.aminer.cn' + (div.xpath('./div[2]/p/a/@href')).extract_first(''))
            time.sleep(3)
            input = self.driver.find_element_by_xpath(
                "//*[@id='root']/section/main/article/div/div[3]/div[2]/div[2]/div[1]/div[4]/div[2]/ul/li[10]/div//input")
            # input =self.driver.find_element_by_xpath(" //*[@class='ant-pagination-options']/div[1]/input[1]")
            current_page = current_page + 1
            input.send_keys(str(current_page))  # current_page为循环的数字
            input.send_keys(Keys.ENTER)
            time.sleep(3)
            # print(self.driver.page_source)
            res = etree.HTML(self.driver.page_source)
            while (current_page < 11):
                print('当前第', current_page, '页\n')
                info_divs = res.xpath("//div[@class='paper-item']")
                for div in info_divs:
                    urls_page.append('https://www.aminer.cn' + str(div.xpath('./div[2]/p/a/@href')).split("'")[1])
                time.sleep(3)
                # 通过模拟浏览器界面在页数位置输入数字，实现翻页操作
                input = self.driver.find_element_by_xpath(
                    "//*[@id='root']/section/main/article/div/div[3]/div[2]/div[2]/div[1]/div[4]/div[2]/ul//input")
                # input =self.driver.find_element_by_xpath(" //*[@class='ant-pagination-options']/div[1]/input[1]")
                current_page = current_page + 1
                input.send_keys(str(current_page))  # current_page为循环的数字
                input.send_keys(Keys.ENTER)
                time.sleep(3)
                # print(self.driver.page_source)
                res = etree.HTML(self.driver.page_source)

            for url_page in urls_page:
                yield Request(url=url_page, callback=self.parse_page)
        print(urls_page)


    def parse_page(self,response):
        res=response
        item=AminerItem()
        print("page爬取")
        item["type"]='article'
        #论文信息爬取模块：标题，摘要，出版机构，出版年份，起始页，终结页，论文id，论文链接，被引用次数
        title=res.xpath("//*[@id='root']/section/main/article/section[1]/h1/span/text()").extract_first('')
        if (have_chinese(title) == True):
            item["title_zh"] = title
            item["title_en"] = None
            item["language"]='zh'
        else:
            item["title_en"] = title
            item["title_zh"] = None
            item["language"]='en'
        try:
            abstract=res.xpath("//*[@id='root']/section/main/article/section[1]/div[1]/div/text()").extract_first('')
            if (have_chinese(abstract) == True):
                item["abstract_zh"] = abstract
                item["abstract_en"] = None
            else:
                item["abstract_en"] = abstract
                item["abstract_zh"] = None
        except Exception as e:
            item["abstract_zh"]=None
            item["abstract_en"] = None
        item["journal_conf_en"]=None
        item["journal_conf"]=None
        item["page_start"]=None
        item["page_end"]=None
        item["paper_year"]=None
        try:
            publish_info=res.xpath("//*[@id='root']/section/main/article/section[2]/div[1]/div[2]/div/div/p/text()").extract_first('')
            journal_conf=publish_info.split(',')[0]
            if (have_chinese(journal_conf) == True):
                item["journal_conf"] = journal_conf
            else:
                item["journal_conf_en"] = journal_conf
            if(len(publish_info.split(','))==3):
                page_info=publish_info.split(',')[1]
                item["page_start"]=page_info.split('-')[0]
                item["page_end"] = page_info.split('-')[1]
                item["paper_year"]=publish_info.split(',')[-1]
            elif(len(publish_info.split(','))==2):
                item["paper_year"]=publish_info.split(',')[-1]
        except Exception as e:
            print("page_information Error")
        item["paper_id"]=res.url.split('/')[-1]
        item["pdf"]='https://www.aminer.cn/archive/'+item["paper_id"]
        item["cited_number"]=None
        try:
            item["cited_number"]=res.xpath('//*[@id="root"]/section/main/article/section[2]/div[1]/div[2]/div/div/div[3]/span[1]/strong/text()').extract_first('')
        except Exception as e:
            print("cited ERROR")

        #作者信息爬取模块：名字，h-指数，论文数，引用数，作者中文名字，作者英文名字，作者链接,作者id，
        item["author_en"]=None
        item["author_zh"]=None
        item["author_affi_en"]=None
        item["author_affi_zh"]=None
        item["author_link"] = None
        item["author_id"] = None
        print(res.xpath("//*[@id='root']/section/main/article/section[2]/div[5]/div[2]/div/div/div/div[1]/div[2]//text()").extract())
        author_info_split = res.xpath("//*[@id='root']/section/main/article/section[2]/div[5]/div[2]/div/div/div/div[1]/div[2]//text()").extract()
        author_info=' '.join(author_info_split)
        try:
            name=author_info_split[0]
            if('(') in name:
                item["author_en"] = name.split('(')[0]
                item["author_zh"]=str(re.findall(r'[(](.*?)[)]', author_info)[0])
                print(name,author_info)
            else:
                item["author_zh"]=name
                item["zuthor_en"]=None
        except Exception as e:
            print('author name ERROR')
        try:
            item["author_id"]=res.xpath('//*[@id="root"]/section/main/article/section[2]/div[5]/div[2]/div/div/div/div//@href').extract_first().split('/')[-1]
            item["author_link"]='https://www.aminer.cn'+res.xpath('//*[@id="root"]/section/main/article/section[2]/div[5]/div[2]/div/div/div/div//@href').extract_first()
            item["author_id"]=item["author_id"]
        except Exception as e:
            print("author_link ERROR")
        try:
            author_nums = re.findall("\d+", author_info)
            item["h_index"]=author_nums[0]
            item["paper_num"]=author_nums[1]
            item["author_citation"]=author_nums[2]
        except Exception as e:
            print('author-index ERROR')
            item["h_index"]=None
            item["paper_num"]=None
            item["author_citation"]=None

        #机构信息爬取模块：作者机构名
        item["organization"]=None
        try:
            organization=re.split('([0-9])\s*', author_info.strip(item["author_zh"]))[-1]
            organization=organization.strip(author_info_split[0])
            if(len(organization)<=1):
                pass
            else:
                item["organization"]=organization
                (item["author_title"], item["organization"]) = filter_title(item["organization"])#职称和机构切分
        except Exception as e:
            print("organization ERROR")
        print(item["organization"])

        #论文信息写入
        paper_insert_id=None#记录插入paper的主键id
        self.cursor = self.conn.cursor()
        sqlExit_zh = "SELECT title_zh FROM cra_paper  WHERE paper_local_id = '%s'" % (item["paper_id"])
        res = self.conn.cursor().execute(sqlExit_zh)
        if(res>=1):
            print("数据存在")
            sqlExit_zh = "SELECT id FROM cra_paper  WHERE paper_local_id = '%s'" % (item["paper_id"])
            res = self.cursor.execute(sqlExit_zh)
            paper_insert_id = (self.cursor.fetchone())[0]
        else:
            try:  # 暂时关闭写入数据库
                dt = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                sql = 'insert into cra_paper(title_zh,title_en,abstract_en,abstract_zh,authors_zh,authors_en,page_start,page_end,journal_conf,journal_conf_en,cited_number,paper_year,type,pdf,create_at,update_at,delete_at,is_deleted,language,paper_local_id,author_link) ' \
                      ' values("%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s")' \
                      % (item["title_zh"], item["title_en"], item["abstract_en"], item["abstract_zh"], item["author_zh"],
                         item["author_en"], item["page_start"], item["page_end"], item["journal_conf"],
                         item["journal_conf_en"], item["cited_number"], item["paper_year"], item["type"],
                         item["pdf"], dt, None, None, 0, item["language"],item["paper_id"],item["author_link"])
                self.conn.query(sql)
                paper_insert_id = int(self.conn.insert_id())
                self.conn.commit()
            except Exception as e:
                print("PYMSQL PAPER ERROR")

        #作者信息写入
        self.cursor = self.conn.cursor()
        sqlExit = "SELECT name FROM cra_author  WHERE name = '%s'" % (item["author_zh"])
        res = self.conn.cursor().execute(sqlExit)
        author_rel_id=None#记录插入author在数据库的主键id
        if (res):  # res为查询到的数据条数如果大于0就代表数据已经存在
            sqlExit="SELECT id FROM cra_author  WHERE name = '%s'" % (item["author_zh"])
            self.cursor.execute(sqlExit)
            print("数据已存在", res)
            author_rel_id=(self.cursor.fetchone())[0]
        else:
            if(item["author_zh"]==None):
                pass
            else:
                try:  # 暂时关闭写入数据库
                    dt = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                    sql = 'insert into cra_author(name,num_pubs,num_citation,h_index,title,create_at)' \
                          ' values("%s","%s","%s","%s","%s","%s")' \
                          % (
                          item["author_zh"],item["paper_num"],item["author_citation"],item["h_index"],item["author_title"],dt
                            )
                    self.conn.query(sql)
                    author_rel_id=int(self.conn.insert_id())
                    self.conn.commit()
                except Exception as e:
                    print("PYMSQL AUTHOR ERROR")
        #机构信息写入
        if(item["organization"] is None):
            pass
        else:
            item["organization"]=item["organization"].lstrip(' ')
        item["nation"]=nation_title(item["organization"])
        organization_rel_id=None#记录插入organization在数据库的主键id
        self.cursor = self.conn.cursor()
        sqlExit_en = "SELECT name FROM cra_organization  WHERE name = '%s'" % (item["organization"])
        res = self.conn.cursor().execute(sqlExit_en)
        if (res >= 1):
            sqlExit = "SELECT id FROM cra_organization  WHERE name = '%s'" % (item["organization"])
            self.cursor.execute(sqlExit)
            organization_rel_id = (self.cursor.fetchone())[0]
        sqlExit_zh = "SELECT id FROM cra_organization  WHERE name_en = '%s'" % (item["organization"])
        res0 = self.conn.cursor().execute(sqlExit_zh)
        if (res0 >= 1):
            sqlExit_zh = "SELECT id FROM cra_organization  WHERE name_en = '%s'" % (item["organization"])
            self.cursor.execute(sqlExit)
            organization_rel_id = (self.cursor.fetchone())[0]
        if (res>=1 or res0>=1):  # res为查询到的数据条数如果大于0就代表数据已经存在
            print("数据存在")
        else:
            if(item["organization"]==None):
                pass
            else:
            #try:  # 暂时关闭写入数据库
                dt = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                if(have_chinese(item["organization"])==True):
                    sql = 'insert into cra_organization(name,nation,create_at)' \
                          ' values("%s","%s","%s")' \
                          % (item["organization"],item["nation"],dt)
                else:
                    sql = 'insert into cra_organization(name_en,nation,create_at)' \
                          ' values("%s","%s","%s")' \
                          % (item["organization"],item["nation"],dt)
                self.conn.query(sql)
                organization_rel_id = int(self.conn.insert_id())
                self.conn.commit()
            #except Exception as e:
                #print("PYMSQL ORGANIZATION ERROR")
        print("PAPER,AUTHOR,ORGANIZATION,ID为,", paper_insert_id, author_rel_id, organization_rel_id)

        #匹配模块
        print("ID为,",paper_insert_id,author_rel_id,organization_rel_id)
        ##作者与文章匹配
        if((author_rel_id is not None)& (paper_insert_id is not None) ):
            self.cursor = self.conn.cursor()
            sqlExit = "SELECT author_id FROM rel_author_paper  WHERE paper_id = '%s'" % (paper_insert_id)
            res = self.conn.cursor().execute(sqlExit)
            if(res>=1):
                print("数据存在")
            else:
                try:  # 暂时关闭写入数据库
                    dt = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                    sql = 'insert into rel_author_paper(author_id,paper_id,create_at)' \
                          ' values("%s","%s","%s")' \
                          % (
                              author_rel_id,paper_insert_id, dt
                          )
                    self.conn.query(sql)
                    self.conn.commit()
                except Exception as e:
                    print("PYMSQL AUTHOR-PAPER-REL ERROR")

        ##机构与作者匹配
        if((author_rel_id is not None) &( organization_rel_id is not None)):
            if(organization_rel_id>=1):
                self.cursor = self.conn.cursor()
                sqlExit = "SELECT org_id FROM rel_author_org  WHERE author_id = '%s'" % (author_rel_id)
                res = self.conn.cursor().execute(sqlExit)
                if(res>=1):
                    print("数据存在")
                else:
                    try:  # 暂时关闭写入数据库
                        dt = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                        sql = 'insert into rel_author_org(author_id,org_id,is_current,create_at)' \
                              ' values("%s","%s","%s","%s")' \
                              % (
                                  author_rel_id,organization_rel_id,1, dt
                              )
                        self.conn.query(sql)
                        self.conn.commit()
                    except Exception as e:
                        print("PYMSQL AUTHOR-ORG-REL ERROR")

        ##论文与关键词信息写入
        if((paper_insert_id is not None)and(id_keywords is not None)):
            self.cursor = self.conn.cursor()
            sqlExit = "SELECT * FROM rel_article_keywords  WHERE (article_id= '%s'and keyword_id= '%s')" % (paper_insert_id,id_keywords)
            res = self.conn.cursor().execute(sqlExit)
            if(res>=1):
                print("数据存在")
            else:
                try:  # 暂时关闭写入数据库
                    dt = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                    sql = 'insert into rel_article_keywords(article_id,keyword_id,create_at,type)' \
                          ' values("%s","%s","%s",1)' \
                          % (
                              paper_insert_id, id_keywords,dt
                          )
                    self.conn.query(sql)
                    self.conn.commit()
                except Exception as e:
                    print("PYMSQL ARTICLE-KEYWORDS ERROR")











