from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options
import time
import random
from parsel import Selector
from connect_db import ConnectMysqldb
import pymysql
# 抓取代理机构详细信息

# config = {'mysql_host': "localhost",
#           'mysql_port': 3306,
#           'mysql_user': "root",
#           'mysql_passwd': "123456",
#           'mysql_db': "big_data"
#           }

db = pymysql.connect(host="localhost",
                     user='root',
                     port=3306,
                     password='123456',
                     database='big_data')
cur = db.cursor()


def get_chrombrowser():
    options = Options()
    options.add_argument('--ignore-certificate-errors')  # 忽略证书错误
    # options.add_argument('--user-data-dir=D:\caiyi\patent_spider\chromefile') #指定用户文件夹
    options.add_argument("--ssl-version-max")
    options.add_experimental_option("excludeSwitches", ["enable-automation"])
    options.add_experimental_option('useAutomationExtension', False)
    desired_capabilities = {
        "acceptInsecureCerts": True
    }
    executable_path = r'C:\Program Files\Google\Chrome\Application\chromedriver'
    chrome_browser = Chrome(
        executable_path=executable_path, options=options, desired_capabilities=desired_capabilities)
    chrome_browser.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
                                   "source": """Object.defineProperty(navigator, 'webdriver', {get: () => undefined})"""})
    return (chrome_browser)


def check_code(chrome_browser):
    soup = Selector(chrome_browser.page_source)
    style = soup.xpath('//div[@id="messagebox"]/@style').extract_first()
    if style == 'display: block;':
        input('请输入验证码：')
    st = True
    while st:
        if '系统繁忙，请稍后再试！' in chrome_browser.page_source or '抱歉，系统在处理请求时发生了错误，给您带来的不便敬请谅解!' in chrome_browser.page_source:
            print('系统繁忙，稍后自动刷新')
            time.sleep(10)
            chrome_browser.refresh()
        else:
            st = False


def parse_agent_info(chrome_browser):
    # 进入机构详情
    check_code(chrome_browser)
    agent_soup = Selector(chrome_browser.page_source)
    # 读取机构基本信息
    InstitutionName = agent_soup.xpath(
        '//div[@class="agency-head"]/div/h5[@class="name"]/text()').extract_first()
    OrganizationCode = agent_soup.xpath(
        '//div[@class="agency-head"]/div/div[@class="info"]//dt[text()="机构代码："]/../dd/text()').extract_first()
    InstitutionalStatus = agent_soup.xpath(
        'normalize-space(//dt[text()="代理机构状态"]/../dd/text())').extract_first()
    InstitutionalNature = agent_soup.xpath(
        'normalize-space(//dt[text()="机构类型："]/../dd/text())').extract_first()
    AgeLimit = agent_soup.xpath(
        'normalize-space(//dt[text()="代理机构成立年限"]/../dd/text())').extract_first()
    NumberAgents = agent_soup.xpath(
        '//th[text()="专利代理师人数"]/following-sibling::td[1]/text()').extract_first()
    CreditRating = agent_soup.xpath(
        'normalize-space(//dt[text()="信用等级"]/../dd/text())').extract_first()
    mailbox = agent_soup.xpath(
        'normalize-space(//dt[text()="电子邮箱："]/../dd/text())').extract_first()
    phone = agent_soup.xpath(
        'normalize-space(//dt[text()="联系电话："]/../dd/text())').extract_first()
    PostalCode = agent_soup.xpath(
        'normalize-space(//dt[text()="邮政编码："]/../dd/text())').extract_first()
    Adress = agent_soup.xpath(
        'normalize-space(//dt[text()="通讯地址："]/../dd/text())').extract_first()
    partner = agent_soup.xpath(
        'normalize-space(/html/body/div/div[1]/div/div/div/div[1]/dl[6]/dd/text())').extract_first()
    if InstitutionName:
        agent_sql = f"""INSERT INTO agency_rank (InstitutionName, OrganizationCode, InstitutionalStatus, InstitutionalNature, AgeLimit, NumberAgents, CreditRating, mailbox, phone, PostalCode, Adress,partner) VALUES
                            ('{InstitutionName}','{OrganizationCode}','{InstitutionalStatus}','{InstitutionalNature}','{AgeLimit}','{NumberAgents}','{CreditRating}','{mailbox}','{phone}','{PostalCode}','{Adress}','{partner}')"""
        print(agent_sql)
        cur.execute(agent_sql)
        db.commit()
        print('===%s：机构基本信息入库完成===' % InstitutionName)
    # 解析人员信息
    per_url_str = agent_soup.xpath(
        '//iframe[@id="agent-frame"]/@src').extract_first()
    if per_url_str:
        per_url = 'http://dlgl.cnipa.gov.cn/' + per_url_str
        time.sleep(random.randint(2, 3))
        chrome_browser.get(per_url)
        check_code(chrome_browser)
        time.sleep(1)
        per_page = 1
        per_page_s = True
        while per_page_s:
            print('@@开始解析%s_第%s页人员信息数据@@' % (InstitutionName, per_page))
            person_soup_main = Selector(chrome_browser.page_source)
            next_page = person_soup_main.xpath(
                '//a[text()="下一页"]/@onclick').extract_first()
            person_soup_list = person_soup_main.xpath(
                '//ul[@class="person-list"]/li').getall()
            for item in person_soup_list:
                person_soup = Selector(item)
                names = person_soup.xpath(
                    '//h5[@class="name"]/a[@class="name"]/text()').extract_first()
                certificate = person_soup.xpath(
                    '//dt[text()="资格证号："]/following-sibling::dd[1]/text()').extract_first()
                FilingNumber = person_soup.xpath(
                    '//dt[text()="执业备案号："]/following-sibling::dd[1]/text()').extract_first()
                speciality = person_soup.xpath(
                    'normalize-space(//p[@class="job"]/text())').extract_first()
                practice = person_soup.xpath(
                    '//dt[text()="执业年限："]/following-sibling::dd[1]/text()').extract_first()
                if names != None:
                    agent_person_sql = f"""INSERT INTO agent_person_rank (names, certificate, FilingNumber, speciality, practice,OrganizationCode) VALUES('{names}','{certificate}','{FilingNumber}','{speciality}','{practice}','{OrganizationCode}')"""
                    # print(agent_person_sql)
                    cur.execute(agent_person_sql)
                    db.commit()
                print('===%s人员：%s信息入库完成===' % (InstitutionName, names))
            if next_page:
                per_page += 1
                chrome_browser.find_element_by_xpath('//a[text()="下一页"]').click()
                time.sleep(random.randint(2, 3))
            else:
                per_page_s = False


def parse_agent_main(chrome_browser):
    start_url = r'http://dlgl.cnipa.gov.cn/txnqueryAgencyOrg.do'
    chrome_browser.get(start_url)
    time.sleep(random.randint(2, 3))
    # 选取湖北,点击查询
    chrome_browser.find_element_by_xpath('//a[text()="湖北省"]').click()
    # el_input = chrome_browser.find_element_by_xpath('//input[@id="qclmvalue"]')
    # el_input.clear()
    # el_input.send_keys('北京和信华成知识产权代理事务所（普通合伙）')
    # time.sleep(1)
    chrome_browser.find_element_by_xpath('//button[text()="查询"]').click()
    time.sleep(5)
    page_num = 1
    try:
        max_page_str = chrome_browser.find_element_by_xpath(
            '//a[text()="尾页"]').get_attribute('onclick')
        max_page = max_page_str.replace(r"pageSkip('", '').replace(r"')", '')
        max_page = int(max_page)
    except:
        max_page = 1
    print(f'max_page:{max_page}')
    while page_num <= max_page:
        print('--------开始抓取第%s页信息--------' % page_num)
        check_code(chrome_browser)
        # 获取单页机构信息
        agent_el_list = chrome_browser.find_elements_by_xpath(
            '//table[@class="agency-tlist"]/tbody/tr//a[@class="name"]')
        random.shuffle(agent_el_list)
        for agent_el in agent_el_list:
            agent_el.click()
            check_code(chrome_browser)
            time.sleep(random.randint(1, 3))
            page_handles = chrome_browser.window_handles
            main_handle = page_handles[0]
            chrome_browser.switch_to.window(page_handles[1])
            parse_agent_info(chrome_browser)
            chrome_browser.close()
            chrome_browser.switch_to.window(main_handle)
            time.sleep(random.randint(3, 5))
        print('@@第%s页信息抓取完成@@' % page_num)
        chrome_browser.find_element_by_xpath('//a[text()="下一页"]').click()
        time.sleep(random.randint(5, 10))
        page_num += 1


if __name__ == '__main__':
    chrome_browser = get_chrombrowser()
    parse_agent_main(chrome_browser)
    db.close()
