# -*- coding: utf-8 -*-
import scrapy
import time
from selenium import webdriver
from crawl.items import Zlfitem, Paperitem
import json
import pymysql
import requests
import re
from datetime import datetime


class ZlfSpider(scrapy.Spider):
    name = 'zlf'
    allowed_domains = ['zlf.cqvip.com']
    start_urls = ['http://zlf.cqvip.com/zk/search.aspx']
    base_url = 'http://zlf.cqvip.com'

    header = {
        'Referer': 'http://zlf.cqvip.com/',
        'Host': 'zlf.cqvip.com', 
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'
    }

    # 关键词列表
    keywords = []
    cur_keyword = ""
    patent_type = "LT%3D4%23专利"
    paper_type = "LT%3D3%23会议论文"
    page = 1
    crawl_id = -1
    url = "http://zlf.cqvip.com/zk/search.aspx?from=index&key=U%3D{keyword}&ids=&&cf={type}&page={page}"

    def __init__(self):
        # 读取关键词列表中的关键词
        with open('keywords.txt', 'r', encoding='utf-8') as f:
            self.keywords = ''.join(f.readlines()).strip('').splitlines()

        while(True):
            print('please input "paper" or "patent":')
            crawl_type = input()
            if(crawl_type == 'paper'):
                self.type = self.paper_type
                break
            elif(crawl_type == 'patent'):
                self.type = self.patent_type
                break

        print('start')

    def isValidate(self, patent):
        #判断是否有title， 公开号
        if patent['title'] == '':
            return False
        return True

    #对字典进行归一处理，去掉空格等等
    def normalization(self, patent):
        mark = patent['mark']
        new_mark = dict()
        for (k, v) in mark.items():
            mark[k] = v.strip()
            k_new = k.strip().replace(u'\u3000', u'').replace(u'\xa0', u'').rstrip('：')
            new_mark[k_new] = v
        patent['mark'] = new_mark

    def SavePaperToSql(self):
        print('Save paper to sql')
        # 读取json文件
        filename = 'zlf.json'
        with open(filename, 'r', encoding = 'utf-8') as f:
            paper_list = f.readlines()
        for i in range(0, len(paper_list)):
            paper_list[i] = paper_list[i].strip().replace(u'\u3000', u'').replace(u'\xa0', u'').rstrip(',')
            if paper_list[i].startswith(u'\ufeff'):
                paper_list[i] = paper_list[i].encode('utf8')[3:].decode('utf8')

        print('Connect to sql')        
        #连接mysql数据库
        #connect = pymysql.connect(host='localhost', user='root', password='Wasd8456+', db='crawl_qingdao', port=3306)
        connect = pymysql.connect(host='rm-bp151dcc75aycqd80to.mysql.rds.aliyuncs.com', user='root', password='Iscas123', db='knowledgeproject_new', port=3306)
        cursor = connect.cursor()

        for paper_json in paper_list:
            cur_time = time.strftime("%Y-%m-%d %H:%M:%S")
            print('cur_time',cur_time)
            paper = json.loads(paper_json)
            if self.isValidate(paper):
                print(paper)
                
                keyword = paper['keyword'][0]
                #
                keywords_zh = paper['keywords'].replace('"', '').strip()
                #
                title_zh = paper['title'].replace('"', '').strip()
                #
                authors_zh = paper['author'].strip()
                #
                abstract_zh = paper['abstract'].replace('"', '').strip()
                #
                authors_affi_zh = paper['org'].replace('"', '').strip()
                #
                journal_conf = paper['conf_name'].replace('"', '').strip()
                #
                issn = paper['conf_paper'].replace('"', '').strip()
                date = paper['conf_date']
                if re.search('[a-z]', date):
                    conf_time = date
                    paper_year = date.strip()[-4]
                else:
                    date = date.replace('年','').replace('月','').replace('日','').replace('-','').strip()
                    if date.endswith("00"):
                        li = date.rsplit('00', 1)
                        date = '01'.join(li)

                    try:
                        datetime.strptime(date, '%Y%m%d')
                        timeArray = time.strptime(date, "%Y%m%d")
                        #
                        conf_time = time.strftime("%Y-%m-%d", timeArray)
                        #
                        paper_year = timeArray.tm_year
                    except ValueError:
                        conf_time = ''
                        paper_year = ''



                #
                conf_location = paper['conf_addr'].replace('"', '').strip()
                #
                source_id = 6
                #
                author_link = paper['author_url'].replace('"', '').strip()
                #
                mark = '分类号:'.join(paper['cate']).strip()

                # 处理patent
                sql = 'SELECT title_zh FROM {} WHERE (title_zh = "{}")'.format('cra_paper', title_zh)
                cursor.execute(sql)
                results = cursor.fetchall()

                if len(results) == 0:
                    sql = 'INSERT into cra_paper(keywords_zh, title_zh, authors_zh, abstract_zh, authors_affi_zh, journal_conf, issn, conf_time, paper_year, conf_location, source_id, author_link, mark, create_at, update_at) VALUES ("{}", "{}", "{}", "{}", "{}", "{}", "{}", "{}", "{}", "{}", "{}", "{}", "{}", "{}", "{}")'.format(keywords_zh, title_zh, authors_zh, abstract_zh, authors_affi_zh, journal_conf, issn, conf_time, paper_year, conf_location, source_id, author_link, mark, cur_time, cur_time)
                    print(sql)
                    cursor.execute(sql)
                    connect.commit()

                # 处理org
                print("------org-----")
                print(authors_affi_zh)
                org_list = re.split('[,;，；]', authors_affi_zh)
                for index in range(len(org_list)):
                    org = org_list[index].strip()
                    if(org == ''):
                        continue
                    search_org = 'SELECT name FROM {} WHERE (name = "{}")'.format('cra_organization', org)
                    cursor.execute(search_org)
                    results = cursor.fetchall()
                    print(org)
                    if len(results) == 0:
                        insert_org = 'INSERT into cra_organization(name, create_at, update_at) VALUES ("{}", "{}", "{}")'.format(org, cur_time, cur_time)
                        print(insert_org)
                        cursor.execute(insert_org)
                        connect.commit()
                    # 处理org的关系
                    search1 = 'SELECT id FROM {} WHERE (title_zh = "{}")'.format('cra_paper', title_zh)
                    cursor.execute(search1)
                    print(search1)
                    results1 = cursor.fetchone()
                    print(results1[0])
                            
                    search2 = 'SELECT id FROM {} WHERE (name = "{}")'.format('cra_organization', org)
                    print(search2)
                    cursor.execute(search2)
                    results2 = cursor.fetchone()
                    print(results2[0])

                    if results1 is not None and results2 is not None:
                        search4 = 'SELECT id FROM rel_org_paper WHERE (paper_id = {} and org_id = {})'.format(results1[0], results2[0])
                        print(search4)
                        cursor.execute(search4)
                        results = cursor.fetchall()
                        if len(results) == 0:
                            sql = 'INSERT into rel_org_paper(paper_id, org_id, create_at, update_at) VALUES ("{}", "{}", "{}", "{}")'.format(results1[0], results2[0], cur_time, cur_time)
                            print(sql)
                            print(results1[0])
                            print(results2[0])
                            cursor.execute(sql)
                            connect.commit()

                # 处理author
                print("------author-----")
                author_list = re.split('[,;，；]', authors_zh)
                print(authors_zh)
                for index in range(len(author_list)):
                    author = author_list[index].strip()
                    if(author == ''):
                        continue
                    search_author = 'SELECT name FROM {} WHERE (name = "{}")'.format('cra_author', author)
                    cursor.execute(search_author)
                    results = cursor.fetchall()
                    print(author)
                    if len(results) == 0:
                        insert_author = 'INSERT into cra_author(name, create_at, update_at) VALUES ("{}", "{}", "{}")'.format(author, cur_time, cur_time)
                        print(insert_author)
                        cursor.execute(insert_author)
                        connect.commit()
                    # 处理author的关系
                    search1 = 'SELECT id FROM {} WHERE (title_zh = "{}")'.format('cra_paper', title_zh)
                    cursor.execute(search1)
                    print(search1)
                    results1 = cursor.fetchone()
                    print(results1[0])
                            
                    search2 = 'SELECT id FROM {} WHERE (name = "{}")'.format('cra_author', author)
                    print(search2)
                    cursor.execute(search2)
                    results2 = cursor.fetchone()
                    print(results2[0])

                    if results1 is not None and results2 is not None:
                        search4 = 'SELECT id FROM rel_author_paper WHERE (paper_id = {} and author_id = {})'.format(results1[0], results2[0])
                        print(search4)
                        cursor.execute(search4)
                        results = cursor.fetchall()
                        if len(results) == 0:
                            insert_ap_rel = 'INSERT into rel_author_paper(paper_id, author_id, create_at, update_at) VALUES ("{}", "{}", "{}", "{}")'.format(results1[0], results2[0], cur_time, cur_time)
                            print(insert_ap_rel)
                            print(results1[0])
                            print(results2[0])
                            cursor.execute(insert_ap_rel)
                            connect.commit()


                #处理关键词之间的关系
                search1 = 'SELECT id FROM {} WHERE (title_zh = "{}")'.format('cra_paper', title_zh)
                cursor.execute(search1)
                print(search1)
                results1 = cursor.fetchone()
                print(results1[0])
                        
                search2 = 'SELECT id FROM {} WHERE (keyword = "{}")'.format('technique_words', keyword)
                print(search2)
                cursor.execute(search2)
                results2 = cursor.fetchone()
                print(results2[0])

                if results1 is not None and results2 is not None:
                    search4 = 'SELECT id FROM rel_article_keywords WHERE (article_id = {} and keyword_id = {})'.format(results1[0], results2[0])
                    print(search4)
                    cursor.execute(search4)
                    results = cursor.fetchall()
                    if len(results) == 0:
                        sql = 'INSERT into rel_article_keywords(article_id, keyword_id, type) VALUES ({}, {}, 2)'.format(results1[0], results2[0])
                        print(sql)
                        print(results1[0])
                        print(results2[0])
                        cursor.execute(sql)
                        connect.commit()


        connect.close()


    def SavePatentToSql(self):
        print('Save patent to sql')
        # 读取json文件
        filename = 'zlf.json'
        with open(filename, 'r', encoding = 'utf-8') as f:
            patent_list = f.readlines()
        for i in range(0, len(patent_list)):
            patent_list[i] = patent_list[i].strip().replace(u'\u3000', u'').replace(u'\xa0', u'').rstrip(',')
            if patent_list[i].startswith(u'\ufeff'):
                patent_list[i] = patent_list[i].encode('utf8')[3:].decode('utf8')
                
        #连接mysql数据库
        #connect = pymysql.connect(host='localhost', user='root', password='Wasd8456+', db='crawl_qingdao', port=3306)
        connect = pymysql.connect(host='rm-bp151dcc75aycqd80to.mysql.rds.aliyuncs.com', user='root', password='Iscas123', db='knowledgeproject_new', port=3306)
        cursor = connect.cursor()

        for patent_json in patent_list:
            cur_time = time.strftime("%Y-%m-%d %H:%M:%S")
            patent = json.loads(patent_json)
            self.normalization(patent)
            if self.isValidate(patent):
                print(patent)
                
                info = patent['mark']
                keyword = patent['keyword'][0]
                pub_number = info['公开号'].rstrip('A')
                #print('gongkaihao', pub_number)
                app_time = info['申请日'].replace('年','').replace('月','').replace('日','')
                timeArray = time.strptime(app_time, "%Y%m%d")
                app_time = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
                app_year = timeArray.tm_year
                #print('shenqingri', app_year)
                app_number = info['申请号'].split('.')[0]
                #print('shenqinghao', app_number)
                typ = '__patent__'
                source_id = 6
                title = patent['title']
                #print('title', title)
                open_time = info['公开日'].replace('年','').replace('月','').replace('日','')
                timeArray = time.strptime(open_time, "%Y%m%d")
                open_time = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
                open_year = timeArray.tm_year
                #print('gongkairi', open_time)
                inventors = info['发明人']
                #print('inventors', inventors)
                org_list = info['申请人']
                #print('shenqingren', org)
                address = info['申请人地址']
                #print('address', address)
                if '代理人' in info.keys():
                    authors = info['代理人']
                    print('authors', authors)
                else:
                    authors = ''
                ipc = ''
                if 'IPC专利分类号' in info.keys():
                    ipc = info['IPC专利分类号'].split(',')[0]
                mark = ''
                if '摘要' in info.keys():
                    mark = info['摘要'].replace('"',"")

                # 处理patent
                sql = 'SELECT title FROM {} WHERE (title = "{}")'.format('cra_patent', title)
                cursor.execute(sql)
                results = cursor.fetchall()

                if len(results) == 0:
                    sql = 'INSERT into cra_patent(pub_number, app_number, app_year, type, source_id,title, ipc, app_time, open_year, open_time, inventors, create_at, update_at, delete_at, is_deleted, mark) VALUES ("{}", "{}", "{}", "{}", "{}", "{}", "{}", "{}", "{}", "{}", "{}", "{}", "{}", "{}", "{}", "{}")'.format(pub_number, app_number, app_year, typ, source_id,title, ipc, open_time, app_time, open_year, inventors, cur_time, cur_time, cur_time, 0, mark)
                    print(sql)
                    cursor.execute(sql)
                    connect.commit()



                # 处理org
                if len(org_list) > 5:

                    # 处理org
                    print("------org-----")
                    print(org_list)
                    org_list = re.split('[,;，；]', org_list)
                    for index in range(len(org_list)):
                        org = org_list[index].strip()
                        if(org == ''):
                            continue
                        search_org = 'SELECT name FROM {} WHERE (name = "{}")'.format('cra_organization', org)
                        cursor.execute(search_org)
                        results = cursor.fetchall()
                        print(org)
                        if len(results) == 0:
                            insert_org = 'INSERT into cra_organization(name, create_at, update_at) VALUES ("{}", "{}", "{}")'.format(org, cur_time, cur_time)
                            print(insert_org)
                            cursor.execute(insert_org)
                            connect.commit()
                        # 处理org的关系
                        search1 = 'SELECT id FROM {} WHERE (title = "{}")'.format('cra_patent', title)
                        cursor.execute(search1)
                        print(search1)
                        results1 = cursor.fetchone()
                        print(results1[0])
                                
                        search2 = 'SELECT id FROM {} WHERE (name = "{}")'.format('cra_organization', org)
                        print(search2)
                        cursor.execute(search2)
                        results2 = cursor.fetchone()
                        print(results2[0])

                        if results1 is not None and results2 is not None:
                            search4 = 'SELECT id FROM rel_org_patent WHERE (patent_id = {} and org_id = {})'.format(results1[0], results2[0])
                            print(search4)
                            cursor.execute(search4)
                            results = cursor.fetchall()
                            if len(results) == 0:
                                sql = 'INSERT into rel_org_patent(patent_id, org_id, create_at, update_at) VALUES ("{}", "{}", "{}", "{}")'.format(results1[0], results2[0], cur_time, cur_time)
                                print(sql)
                                print(results1[0])
                                print(results2[0])
                                cursor.execute(sql)
                                connect.commit()

                # 处理author
                print("------author-----")
                author_list = re.split('[,;，；]', authors)
                print(authors)
                for index in range(len(author_list)):
                    author = author_list[index].strip()
                    if(author == ''):
                        continue
                    search_author = 'SELECT name FROM {} WHERE (name = "{}")'.format('cra_author', author)
                    cursor.execute(search_author)
                    results = cursor.fetchall()
                    print(author)
                    if len(results) == 0:
                        insert_author = 'INSERT into cra_author(name, create_at, update_at) VALUES ("{}", "{}", "{}")'.format(author, cur_time, cur_time)
                        print(insert_author)
                        cursor.execute(insert_author)
                        connect.commit()
                    # 处理author的关系
                    search1 = 'SELECT id FROM {} WHERE (title = "{}")'.format('cra_patent', title)
                    cursor.execute(search1)
                    print(search1)
                    results1 = cursor.fetchone()
                    print(results1[0])
                            
                    search2 = 'SELECT id FROM {} WHERE (name = "{}")'.format('cra_author', author)
                    print(search2)
                    cursor.execute(search2)
                    results2 = cursor.fetchone()
                    print(results2[0])

                    if results1 is not None and results2 is not None:
                        search4 = 'SELECT id FROM rel_author_patent WHERE (patent_id = {} and author_id = {})'.format(results1[0], results2[0])
                        print(search4)
                        cursor.execute(search4)
                        results = cursor.fetchall()
                        if len(results) == 0:
                            sql = 'INSERT into rel_author_patent(patent_id, author_id, create_at, update_at) VALUES ("{}", "{}", "{}", "{}")'.format(results1[0], results2[0], cur_time, cur_time)
                            print(sql)
                            print(results1[0])
                            print(results2[0])
                            cursor.execute(sql)
                            connect.commit()


                # 处理几个之间的关系
                search1 = 'SELECT id FROM {} WHERE (title = "{}")'.format('cra_patent', title)
                cursor.execute(search1)
                print(search1)
                results1 = cursor.fetchone()
                print(results1[0])
                        
                search2 = 'SELECT id FROM {} WHERE (keyword = "{}")'.format('technique_words', keyword)
                print(search2)
                cursor.execute(search2)
                results2 = cursor.fetchone()
                print(results2[0])

                if results1 is not None and results2 is not None:
                    search4 = 'SELECT id FROM rel_article_keywords WHERE (article_id = {} and keyword_id = {})'.format(results1[0], results2[0])
                    print(search4)
                    cursor.execute(search4)
                    results = cursor.fetchall()
                    if len(results) == 0:
                        sql = 'INSERT into rel_article_keywords(article_id, keyword_id, type) VALUES ({}, {}, 2)'.format(results1[0], results2[0])
                        print(sql)
                        print(results1[0])
                        print(results2[0])
                        cursor.execute(sql)
                        connect.commit()

        connect.close()


    def parse(self, response):
        print('-----------------\n',self.cur_keyword, '\n', response.url, '\n----------------\n')
        hrefs = response.xpath("//a[contains(@class, 'title') and contains(@class, 'btnTitle')]/@href").extract()

        for article_url in hrefs:
            url = self.base_url+article_url
            print(self.cur_keyword, url)
            time.sleep(0.2)
            if self.type == self.patent_type:
                yield scrapy.Request(url, callback=self.parse_article, headers={'Connection': 'close', 'keyword': self.cur_keyword}, dont_filter=True)
            elif self.type == self.paper_type:
                yield scrapy.Request(url, callback=self.parse_paper, headers={'Connection': 'close', 'keyword': self.cur_keyword}, dont_filter=True)

        next_page = response.xpath("//*[@id='pageSpan']/span[2]/a[2]/text()").extract()[0].strip()
        if next_page == '下一页':
            print(next_page, '-------------- ', self.page)
            self.page += 1
            url = self.url.format(keyword=self.cur_keyword, type=self.type, page=self.page)
            time.sleep(0.5)
            yield scrapy.Request(url, callback=self.parse, headers={'Connection': 'close'})
        else:
            self.crawl_id += 1
            if self.crawl_id >= len(self.keywords):
                print('crawl complete')
                if self.type == self.paper_type:
                    self.SavePaperToSql()
                else:
                    self.SavePatentToSql()
                return 
            time.sleep(10)
            self.cur_keyword = self.keywords[self.crawl_id]
            url = self.url.format(keyword=self.cur_keyword, type=self.type, page=1)
            print(url)
            self.page = 1
            yield scrapy.Request(url=url, callback=self.parse, headers={'Connection': 'close'}, dont_filter = True)


    def parse_article(self, response):
        print(response.url)
        item = Zlfitem()
        item['keyword'] = response.request.headers.getlist('keyword')
        #文章
        article = response.xpath("//div[@class='article']")

        # #标题
        title = article.xpath("./div[1]/h1/text()").extract()[1].strip()
        item['title'] = title
        print(title)

        #细节
        detail_info = {}
        details = article.xpath("./div[2]/p")
        for detail in details:
            key = detail.xpath("./strong/text()").extract()[0].replace(' ', '')
            value = detail.xpath("./text()").extract()[0].replace(' ', '')
            if value == '':
                value = detail.xpath("./*[name(.) != 'strong']/text()").extract()[0].replace(' ', '')
            #print(key, value)
            detail_info[key] = value
        item['mark'] = detail_info
        
        yield item

    def parse_paper(self, response):
        print(response.url)
        item = Paperitem()
        item['keyword'] = response.request.headers.getlist('keyword')
        #文章
        article = response.xpath("//div[@class='article']")

        # #标题
        title = article.xpath("./div[1]/h1/text()").extract()[1].strip()
        item['title'] = title
        print(title)

        #细节
        author = ';'.join(article.xpath('.//strong[contains(text(), "作　　者")]/../a[@title]/text()').extract())
        author_url = ';'.join(article.xpath('.//strong[contains(text(), "作　　者")]/../a/b/../@href').extract())

        org = ';'.join(article.xpath('.//strong[contains(text(), "作者单位")]/../a[@title]/text()').extract())
        org_url = ';'.join(article.xpath('.//strong[contains(text(), "作者单位")]/../a/b/../@href').extract())

        conf_paper = ';'.join(article.xpath('.//strong[contains(text(), "会议文献")]/../text()').extract())
        conf_name = ';'.join(article.xpath('.//strong[contains(text(), "会议名称")]/../a/text()').extract())
        conf_date = ';'.join(article.xpath('.//strong[contains(text(), "会议日期")]/../text()').extract())
        conf_addr = ';'.join(article.xpath('.//strong[contains(text(), "会议地点")]/../text()').extract())
        conf_org = ';'.join(article.xpath('.//strong[contains(text(), "主办单位")]/../text()').extract())

        abstract = ';'.join(article.xpath('.//strong[contains(text(), "摘　　要")]/../text()').extract())

        keywords = ';'.join(article.xpath('.//strong[contains(text(), "关 键 词")]/../a/text()').extract())

        cate = ';'.join(article.xpath('.//strong[contains(text(), "分 类 号")]/../a[1]/text()').extract())

        item['author'] = author
        item['author_url'] = author_url
        item['org'] = org
        item['org_url'] = org_url
        item['conf_paper'] = conf_paper
        item['conf_name'] = conf_name
        item['conf_date'] = conf_date
        item['conf_addr'] = conf_addr
        item['conf_org'] = conf_org
        item['abstract'] = abstract
        item['keywords'] = keywords
        item['cate'] = cate

        # 作者：//div[@class='article']//strong[contains(text(), "作　　者")]/../a[@title]/text()
        # 作者链接：//div[@class='article']//strong[contains(text(), "作　　者")]/../a/b/../@href

        # 作者单位：//div[@class='article']//strong[contains(text(), "作者单位")]/../a[@title]/text()
        # 作者单位链接：//div[@class='article']//strong[contains(text(), "作者单位")]/../a/b/../@href

        # 会议文献：//div[@class='article']//strong[contains(text(), "会议文献")]/../text()
        # 会议名称：//div[@class='article']//strong[contains(text(), "会议名称")]/../a/text()
        # 会议日期：//div[@class='article']//strong[contains(text(), "会议日期")]/../text()
        # 会议地点：//div[@class='article']//strong[contains(text(), "会议地点")]/../text()
        # 主办单位：//div[@class='article']//strong[contains(text(), "主办单位")]/../text()

        # 摘要：//div[@class='article']//strong[contains(text(), "摘　　要")]/../text()

        # 关键词：//div[@class='article']//strong[contains(text(), "关 键 词")]/../a/text()

        # 分类号：//div[@class='article']//strong[contains(text(), "分 类 号")]/../a[1]/text()
        
        yield item






