# -*- coding: utf-8 -*-
import scrapy
import datetime
import re
import time
import random
from PIL import Image
import traceback
from crawl.items import Patentitem, Gongsiitem, Authoritem, Author_gongsi, Author_patent

# selenium相关库
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys

# scrapy 信号相关库
from scrapy.utils.project import get_project_settings
from scrapy import signals

from pydispatch import dispatcher

class OrbitSpider(scrapy.spiders.CrawlSpider):
    name = 'orbit'
    allowed_domains = ['orbit.com', 'asia-orbit.com']
    start_urls = ['http://www.orbit.com/']
    has_login = False
    relative_standard = 95

    # custom_settings = {
    #     'LOG_LEVEL' : 'INFO',
    #     'DOWNLOAD_DELAY' : 1,
    #     'COOKIES_ENABLED' : False,
    #     'DOWNLOADER_MIDDLEWARES' : {
    #         'crawl.middlewares.SeleniumMiddleware' : 500,
    #         'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
    #     }
    # }

    # 使用selenium + scrapy 模拟点击登陆
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"
    }

    def __init__(self):
        # 读取关键词列表中的关键词
        with open('keywords.txt', 'r', encoding='UTF-8') as f:
            self.keywords = ''.join(f.readlines()).strip('\n').splitlines()

        driver = r"chromedriver.exe"
        self.browser = webdriver.Chrome(executable_path=driver)
        self.wait = WebDriverWait(self.browser, 60)
        self.browser.set_page_load_timeout(60)

        dispatcher.connect(receiver=self.close_handle,
                           signal=signals.spider_closed)


    # 信号量处理函数：关闭chrome浏览器
    def close_handle(self, spider):
        print("mySpiderCloseHandle: enter ")
        self.browser.quit()

    def isEmpty(self):
        try:
            s = self.browser.find_element_by_xpath("//*[@id='patent-list-view']//*[contains(text(), '%')]")
            return False
        except Exception as e:
            return True
        else:
            return False
        

    def parse(self, response):
        print('start orbit crawler')

        # # 登陆
        # yield scrapy.Request(
        #     url="http://www.orbit.com/?locale=zh&embedded=false",
        #     meta={'used_selenium':True, 'page_type':'login'},
        #     callback=self.orbit_search,
        #     errback=self.error_handle,
        #     dont_filter=True,
        # )

        print("login...")
        origin_url = "http://www.orbit.com/?locale=zh&embedded=false"
        try:
            self.browser.get(origin_url)
            for i in 1, 2:
                # 输入用户名和密码登陆
                email_input = self.wait.until(
                    EC.presence_of_element_located((By.ID, "email"))
                )
                time.sleep(1)
                email_input.clear()
                email_input.send_keys("qprqt001")

                password_input = self.browser.find_element_by_id('password')
                time.sleep(1)
                password_input.clear()
                password_input.send_keys("sdfrty74")

                login_button = self.browser.find_element_by_id('login')
                time.sleep(1)
                login_button.click()

            print("url : ------ " + self.browser.current_url)

            # # 判断有没有弹窗
            result = self.wait.until(
                EC.presence_of_element_located((By.XPATH,"//button[text()='是']"))
            )
            result.click()
            for kw in self.keywords:


                search = self.wait.until(
                    EC.presence_of_element_located((By.XPATH, "//textarea[@id='freeTextArea-input']"))
                    )
                click = self.browser.find_element_by_xpath("//button[contains(text(),'检索')]")
                print(kw)

                time.sleep(2)
                print("search...")
                
                self.browser.execute_script("arguments[0].focus();",search)
                time.sleep(2)

                #执行js语句
                self.browser.execute_script("arguments[0].value='{}'".format(kw),search)
                time.sleep(2)
                click.click()
                time.sleep(1)

                # keyword.clear()
                # keyword.send_keys(kw)
                # time.sleep(1)
                # keyword.send_keys(Keys.ENTER)
                # time.sleep(1)

                # search = self.browser.find_element_by_xpath(
                #     "//table[@id ='PatentAssistAdvancedSearchViewOkBtn']//button")
                # search.click()
                
                # 相关度低于 n 时退出
                relative = True
                # 专利列表界面
                while relative:
                    # 当前页面的专利数量: result = re.findall(".* (\d+) - (\d+) .*", string)
                    time.sleep(20)
                    
                    # 如果没有搜索到结果 跳过
                    if self.isEmpty():
                        break
                    wait = self.wait.until(
                        EC.text_to_be_present_in_element((By.XPATH, "//div[contains(text(), '正在显示结果')]"), '-')
                    )
                    count_text = self.browser.find_element_by_xpath("//div[contains(text(), '正在显示结果')]").text
                    result = re.findall('.* (\d+) - (\d+) .*', count_text)
                    length = int(result[0][1]) - int(result[0][0]) + 1

                    for i in range(0, length):
                        print(int(result[0][0])+i)
                        item = Patentitem()
                        gongsi_item = Gongsiitem()
                        # 对每个专利处理
                        # 标题
                        atc_path = "//div[@id='questelTableViewItem_{}']//td[@fieldname='MyTI']//a".format(i)
                        article = self.wait.until(
                            EC.element_to_be_clickable((By.XPATH, atc_path))
                        )
                        title = article.text
                        # 专利授权号
                        pub_number_path = "//div[@id='questelTableViewItem_{}']//td[@fieldname='MyPN']".format(i)
                        pub_number = self.browser.find_element_by_xpath(pub_number_path).text
                        # 相关度
                        relative_per_path = "//div[@id='questelTableViewItem_{}']//td[@fieldname='RS']".format(i)
                        relative_per = self.browser.find_element_by_xpath(relative_per_path).text.strip(" %")
                        print(int(relative_per), self.relative_standard)
                        if int(relative_per) < self.relative_standard:
                            relative = False
                            break

                        item['title'] = title
                        item['pub_number'] = pub_number
                        #进入每个专利页面
                        print(title, 'title')
                        print(pub_number, 'pub_number')
                        article.click() 

                        # 进入全文页面 //li[@id='PatentCompleteRegularViewHeader']/a[2]
                        quanwen = self.wait.until(
                            EC.element_to_be_clickable((By.XPATH, "//li[@id='PatentCompleteRegularViewHeader']/a[2]"))
                        )
                        quanwen.click()
                        # 这里要等一下
                        # dengdai = self.wait.until(
                        #     EC.text_to_be_present_in_element((By.XPATH, "//div[contains(text(), '公开列表')]"), "公开列表")
                        # )
                        # dengdai = self.wait.until(
                        #     EC.element_to_be_clickable((By.XPATH, "//*[@id='questelTableViewItem_1']/table/tbody/tr/td/div/div/div/div[2]/ul/li/ul/li/span/span[1]/a"))
                        # )
                        time.sleep(10)
                        print('----------')

                        app_number_ele = self.browser.find_element_by_xpath("//span[contains(text(), 'Application number')]/following-sibling::span[1]")
                        app_number = app_number_ele.text
                        app_time_ele = self.browser.find_element_by_xpath("//span[contains(text(), '日期')]/following-sibling::span[1]")

                        app_time = app_time_ele.text
                        print(app_time)
                        legal_status_time_ele = self.browser.find_element_by_xpath("//ul[@class='hierarchical-tree--dotted']//span[@class='gwt-InlineLabel publicationList-Publication-Date']")
                        legal_status_time = legal_status_time_ele.text

                        has_inventors = True
                        has_domain = True

                        inventors_ele = self.browser.find_element_by_xpath("//div//td[contains(text(), 'Inventor')]/following-sibling::td")
                        domain_ele = self.browser.find_element_by_xpath("//div//td[contains(text(), 'Technology domain')]/following-sibling::td")

                        
                        print(app_number, 'app_number')
                        print(app_time, 'app_time')
                        print(legal_status_time,'legal_status_time')
                        print(inventors_ele.text, 'inventors_ele.text')
                        print(domain_ele.text,'domain_ele.text')
                        inventors = inventors_ele.text
                        domain = domain_ele.text



                        item['keyword'] = kw
                        item['app_number'] = app_number
                        item['app_time'] = app_time
                        item['legal_status_time'] = legal_status_time
                        item['inventors'] = inventors
                        item['domain'] = domain

                        has_assignee = True
                        try:
                            assignee_ele = self.browser.find_element_by_xpath("//div//td[(contains(text(), 'Assignee')) and not (contains(text(), ' '))]/following-sibling::td")
                        except Exception as e:
                            assignee = ""
                            has_assignee = False
                        else:
                            assignee = assignee_ele.text
                            assignee_split = assignee[14:].split('[')[0].strip()
                            gongsi_item['name'] = assignee_split

                        inventors_l = inventors.splitlines()
                        if(has_assignee and len(assignee_split) > 4):
                            yield gongsi_item
                            author_org = Author_gongsi()
                            author_org['authors'] = inventors_l
                            author_org['org'] = assignee_split
                            yield author_org
                        
                        #这里建立author_patent表
                        if(has_inventors):
                            author_patent = Author_patent()
                            author_patent['authors'] = inventors_l
                            author_patent['patent'] = title
                            yield author_patent

                            for inventor in inventors_l:
                                author_item = Authoritem()
                                author_item['name'] = inventor
                                yield  author_item

                        #法律状态 PatentLegalStatusViewHeader
                        falvzhuangtai = self.browser.find_element_by_xpath("//li[@id='PatentLegalStatusViewHeader']/a[2]")
                        falvzhuangtai.click()
                        simple_status_ele = self.wait.until(
                            EC.presence_of_element_located(
                                (By.XPATH, "//div[@id='PatentTreeGrid']//div[@class='x-grid3-row '][2]//td[2]"))
                        )
                        simple_status = simple_status_ele.text
                        legal_status_time_ele = self.browser.find_element_by_xpath("//div[@id='PatentTreeGrid']//div[@class='x-grid3-row '][2]//td[3]")
                        legal_status_time = legal_status_time_ele.text

                        item['simple_status'] = simple_status
                        item['legal_status_time'] = legal_status_time

                        yield item


                        cancel_btn = self.wait.until(
                            EC.presence_of_element_located(
                                (By.XPATH, "//button[contains(text(), '回到列表')]"))
                        )
                        cancel_btn.click()

                    # 查看下一页按钮是否可点击
                    next_page = self.wait.until(
                        EC.element_to_be_clickable(
                            (By.XPATH, "//table[@id='showNextPage']//button[@class='x-btn-text']"))
                    )
                    print(next_page.get_attribute("aria-disabled"))
                    if (next_page.get_attribute("aria-disabled") != 'false'):
                        print('到达最后一页')
                        keyword = self.wait.until(
                            EC.element_to_be_clickable(
                                (By.XPATH, "//div[@id='comboSearchHisto']//input"))
                        )
                        break
                    next_page.click()
                self.browser.refresh()


        except Exception as e:
            print(f"chrome user login handle error, Exception = {e}")
            traceback.print_exc(e)
        else:
            self.browser.quit()


    # orbit
    # def orbit_search(self):
        # print(f"parseLoginRes: statusCode = {response.status}, url = {response.url}")
        # print(f"parseLoginRes: cookies1 = {response.request.cookies}")
        # print(f"parseLoginRes: cookies2 = {response.request.headers.getlist('Cookie')}")

        # yield scrapy.Request(
        #     url = response.url,
        #     meta={'used_selenium':True, 'page_type':'search', 'keyword':'潜水器'},
        #     callback=self.parse,
        #     errback=self.error_handle,
        #     dont_filter=True,
        # )

        # # 保存cookies信息
        # cookies = browser.get_cookies()
        # jsonCookies = json.dumps(cookies)
        # with open('orbitCookies.json', 'w') as f:
        #     f.write(jsonCookies)
        #     print(cookies)
        #     print("login success")

    # def process_patent(self, item):
    #     return item

    # 请求错误处理：可以打印，写文件，或者写到数据库中
    def error_handle(self, failure):
        print(f"request error: ")

        # headers = {
    #     'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36",
    #     'Origin': "https://asia-orbit.com",
    #     'Upgrade-Insecure-Requests' : 1,
    #     'Host' : 'asia-orbit.com'
    # }
    #
    # def parse(self, response):
    #     pass
    #
    # def start_requests(self):
    #     print("start requests")
    #     yield Request('https://asia-orbit.com/', headers= self.headers, callback=self.do_login)
    #
    # def do_login(self, response):
    #     login_url = "https://asia-orbit.com/j_spring_security_check"
    #     login_data = {
    #         "email" : "qprqt001",
    #         "password" : "sdfrty74",
    #     }
    #     yield FormRequest(url=login_url, formdata=login_data, headers=self.headers, method = 'POST', dont_filter = True, callback=self.is_login)
    #
    # def is_login(self, response):
    #     print(response.body)
    #     print(response.request.headers)
    #     print("\n----------")
    #     print(response.headers)
    #     print(response.status)
    #     print(response.url)
