from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options
from parsel import Selector
from playwright.sync_api import sync_playwright
import time
import csv


def get_chrombrowser():
    options = Options()
    options.add_argument('--ignore-certificate-errors')  # 忽略证书错误
    # options.add_argument('--user-data-dir=D:\caiyi\patent_spider\chromefile') #指定用户文件夹
    options.add_argument("--ssl-version-max")
    options.add_experimental_option("excludeSwitches", ["enable-automation"])
    options.add_experimental_option('useAutomationExtension', False)
    desired_capabilities = {
        "acceptInsecureCerts": True
    }
    executable_path = r'C:\Program Files\Google\Chrome\Application\chromedriver'
    chrome_browser = Chrome(
        executable_path=executable_path, options=options, desired_capabilities=desired_capabilities)
    chrome_browser.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
                                   "source": """Object.defineProperty(navigator, 'webdriver', {get: () => undefined})"""})
    return (chrome_browser)


def dinxianyuan(chrome_browser):
    start_url = r'https://y.dxy.cn/hospital/?page=%s&location=420000'
    for page_num in range(1, 11):
        chrome_browser.get(start_url % page_num)
        page_soup = Selector(chrome_browser.page_source)
        item_list = page_soup.xpath('//div[@id="hospitallist"]/div').getall()
        for item in item_list:
            soup = Selector(item)
            name_list = soup.xpath(
                '//div[@class="hospital-title"]//text()').extract()
            name_list = [i.replace('\n', '').replace(' ', '')
                         for i in name_list]
            name = ''.join(name_list)
            area = soup.xpath(
                'normalize-space((//div[@class="td"])[2]/text())').extract_first()
            xinzhi = soup.xpath(
                'normalize-space((//div[@class="td"])[3]/text())').extract_first()
            leibie = soup.xpath(
                'normalize-space((//div[@class="td"])[4]/text())').extract_first()
            denji = soup.xpath(
                'normalize-space((//div[@class="td"])[5]/text())').extract_first()
            guimo = soup.xpath(
                'normalize-space((//div[@class="td"])[6]/text())').extract_first()
            shijian = soup.xpath(
                'normalize-space((//div[@class="td"])[7]/text())').extract_first()
            data = [name, area, xinzhi, leibie, denji, guimo, shijian]

            with open('医院列表.csv', 'a+', encoding='utf-8', newline='') as file:
                csv_writer = csv.writer(file)
                csv_writer.writerow(data)
                print(data)
        import time
        time.sleep(1)


def hubei_yiyuan():
    start_url = r'http://wjw.hubei.gov.cn/bsfw/bmcxfw/snyljgcx/'
    with sync_playwright() as p:
        browser = p.firefox.launch(headless=False)
        context = browser.new_context()
        context.set_default_timeout(120000)
        context.set_default_navigation_timeout(120000)
        page = context.new_page()
        page_info = context.new_page()
        page_num = 1
        page.goto(start_url)
        page.wait_for_load_state()
        input('选取医院:')
        s = True
        while s:
            print('@@开始抓取：%s页@@' % page_num)
            page_soup = Selector(page.content())
            name_list = page_soup.xpath(
                '//tr[@role="row"]/td[2]/text()').extract()
            href_list = page_soup.xpath(
                '//a[text()="卫生机构详细信息"]/@href').extract()
            next_page = page_soup.xpath(
                '//a[@id="example_next"]/@class').extract_first()
            for name, href in zip(name_list, href_list):
                url = r'http://wjw.hubei.gov.cn/bsfw/bmcxfw/snyljgcx' + \
                    href.split('.', 1)[1]
                page_info.goto(url)
                page_info.wait_for_load_state()
                info_soup = Selector(page_info.content())
                address = info_soup.xpath(
                    '//td[@id="s010301"]/text()').extract_first()
                riqi = info_soup.xpath(
                    '//td[@id="s0104"]/text()').extract_first()
                zhuban = info_soup.xpath(
                    '//td[@id="tjOrgan"]/text()').extract_first()
                with open('官网医院_列表.csv', 'a+', encoding='utf-8', newline='') as file:
                    csv_writer = csv.writer(file)
                    csv_writer.writerow([name, address, riqi, zhuban])
            if next_page != 'paginate_button next disabled':
                page.click('xpath=//a[@id="example_next"]')
                page_num += 1
                import time
                time.sleep(2)
            else:
                s = False


def yiyun_baike():
    city_list = ['武汉市', '襄樊市', '宜昌市', '荆州市', '黄石市', '黄冈市', '孝感市', '十堰市',
                 '咸宁市', '恩施土家族苗族自治州', '荆门市', '随州市', '鄂州市', '天门市', '仙桃市', '潜江市', '神农架林区']
    url = r'https://yixue.com/%s医院列表'
    for city in city_list:
        chrome_browser.get(url % city)
        soup = Selector(chrome_browser.page_source)
        yiyuan_list = soup.xpath(
            '(//div[@class="mw-parser-output"]/ul)[4]/li').getall()
        for item in yiyuan_list:
            yiyuan_soup = Selector(item)
            name = yiyuan_soup.xpath(
                'normalize-space(//b/a/text())').extract_first()
            denji = yiyuan_soup.xpath(
                'normalize-space(//ul/li/b[text()="医院等级"]/../text())').extract_first()
            if denji:
                denji = denji.replace('：', '')

            jinyin = yiyuan_soup.xpath(
                'normalize-space(//ul/li/b[text()="经营方式"]/../text())').extract_first()
            if jinyin:
                jinyin = jinyin.replace('：', '')
            dizhi = yiyuan_soup.xpath(
                'normalize-space(//ul/li/b[text()="医院地址"]/../text())').extract_first()
            data = [name, denji, jinyin, dizhi, city]
            with open('医学百科_医院列表.csv', 'a+', encoding='utf-8', newline='') as file:
                csv_writer = csv.writer(file)
                csv_writer.writerow(data)
                print(data)


if __name__ == '__main__':
    chrome_browser = get_chrombrowser()
    start_url = r'https://zgylbx.com/index.php?m=content&c=index&a=lists&catid=106&steps=&search=1&pc_hash=&k1=%E6%B9%96%E5%8C%97%E7%9C%81&k2=0&k3=0&title=&page={page}'
    page = 1
    for page in range(1, 58):
        print('@@抓取第%s页@@' % page)
        url = start_url.format(page=page)
        chrome_browser.get(url)
        soup = Selector(chrome_browser.page_source)
        item_list = soup.xpath('//tr[contains(@class,"tr-dt")]').getall()
        for item in item_list:
            item_soup = Selector(item)
            name = item_soup.xpath('//td[1]/text()').extract_first()
            denji = item_soup.xpath('//td[3]/text()').extract_first()
            data = [name, denji]
            with open('医疗保险_医院列表.csv', 'a+', encoding='utf-8', newline='') as file:
                csv_writer = csv.writer(file)
                csv_writer.writerow(data)
                print(data)
        time.sleep(2)
    chrome_browser.quit()
