# coding=utf-8
from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options
import time
import random
from parsel import Selector
from connect_db import ConnectMysqldb
import pymysql
# 抓取代理机构详细信息

# config = {'mysql_host': "localhost",
#           'mysql_port': 3306,
#           'mysql_user': "root",
#           'mysql_passwd': "123456",
#           'mysql_db': "big_data"
#           }

db = pymysql.connect(host="localhost",
                     user='root',
                     port=3306,
                     password='123456',
                     database='big_data')
cur = db.cursor()

def get_chrombrowser():
    options = Options()
    options.add_argument('--ignore-certificate-errors') # 忽略证书错误
    # options.add_argument('--user-data-dir=D:\caiyi\patent_spider\chromefile') #指定用户文件夹
    options.add_argument("--ssl-version-max")
    options.add_experimental_option("excludeSwitches", ["enable-automation"])
    options.add_experimental_option('useAutomationExtension', False)
    desired_capabilities = {
        "acceptInsecureCerts": True
    }
    executable_path = r'C:\Program Files\Google\Chrome\Application\chromedriver'
    chrome_browser = Chrome(
        executable_path=executable_path, options=options,desired_capabilities=desired_capabilities)
    chrome_browser.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {"source": """Object.defineProperty(navigator, 'webdriver', {get: () => undefined})"""})
    return (chrome_browser)

def check_code(chrome_browser):
    soup = Selector(chrome_browser.page_source)
    style = soup.xpath('//div[@id="messagebox"]/@style').extract_first()
    if style=='display: block;':
        input('请输入验证码：')
    st = True
    while st:
        if '系统繁忙，请稍后再试！' in chrome_browser.page_source or '抱歉，系统在处理请求时发生了错误，给您带来的不便敬请谅解!' in chrome_browser.page_source:
            print('系统繁忙，稍后自动刷新')
            time.sleep(10)
            chrome_browser.refresh()
        else:
            st=False

def parse_agent_info(agent_el_str):
    # 进入机构详情
    agent_soup = Selector(agent_el_str)
    # 读取机构基本信息
    InstitutionName = agent_soup.xpath('normalize-space((//td)[2]/a/text())').extract_first()
    OrganizationCode = agent_soup.xpath(
        'normalize-space((//td)[1]/text())').extract_first()
    InstitutionalStatus = agent_soup.xpath('normalize-space((//td)[3]/text())').extract_first()
    InstitutionalNature = agent_soup.xpath('normalize-space((//td)[4]/text())').extract_first()
    AgeLimit = agent_soup.xpath('normalize-space((//td)[5]/text())').extract_first()
    CreditRating = agent_soup.xpath('normalize-space((//td)[7]/text())').extract_first()
    NumberAgents = agent_soup.xpath('normalize-space((//td)[6]/text())').extract_first()
    agent_sql = f"""INSERT INTO agency (InstitutionName, OrganizationCode, InstitutionalStatus, InstitutionalNature, AgeLimit, NumberAgents, CreditRating) VALUES('{InstitutionName}','{OrganizationCode}','{InstitutionalStatus}','{InstitutionalNature}','{AgeLimit}','{NumberAgents}','{CreditRating}')"""
    print(agent_sql)
    cur.execute(agent_sql)
    db.commit()
    print('===%s：机构基本信息入库完成===' % InstitutionName)

def parse_agent_main(chrome_browser):
    start_url= r'http://dlgl.cnipa.gov.cn/txnqueryAgencyOrg.do'
    chrome_browser.get(start_url)
    time.sleep(random.randint(2,3))
    # 选取湖北,点击查询
    # chrome_browser.find_element_by_xpath('//a[text()="湖北省"]').click()
    # time.sleep(1)
    # chrome_browser.find_element_by_xpath('//button[text()="查询"]').click()
    # time.sleep(5)
    page_num=1
    max_page_str = chrome_browser.find_element_by_xpath('//a[text()="尾页"]').get_attribute('onclick')
    max_page = max_page_str.replace(r"pageSkip('",'').replace(r"')",'')
    max_page = int(max_page)
    print(f'max_page:{max_page}')
    while page_num<=max_page:
        print('--------开始抓取第%s页信息--------'%page_num)
        check_code(chrome_browser)
        # 获取单页机构信息
        page_soup = Selector(chrome_browser.page_source)
        agent_el_str_list = page_soup.xpath('//table[@class="agency-tlist"]/tbody/tr').getall()
        for agent_el_str in agent_el_str_list:
            parse_agent_info(agent_el_str)
        print('@@第%s页信息抓取完成@@'%page_num)
        chrome_browser.find_element_by_xpath('//a[text()="下一页"]').click()
        time.sleep(random.randint(1,3))
        page_num+=1
if __name__ == '__main__':
    chrome_browser = get_chrombrowser()
    parse_agent_main(chrome_browser)
    db.close()
