import pymysql
import requests
import re
from selenium import webdriver
import hashlib
import datetime


class GLY(object):
    def __init__(self):
        self.host = '127.0.0.1'
        self.db = 'app_mark'
        self.user = 'root'
        self.passwd = '123456'
        self.charset = 'utf8mb4'
        # 总局公告第一页，从该页中找出总页数
        self.link1 = 'http://samr.cfda.gov.cn/WS01/CL1698/'
        # 地方公告第一页
        self.link2 = 'http://samr.cfda.gov.cn/WS01/CL1699/'
        self.urls = []
        self.driver = None

    def get_pages(self, link):
        # 得到该频道总页数
        # 直接拿源码拿不到，用selenium
        opt = webdriver.ChromeOptions()
        opt.add_argument('--headless')
        opt.add_argument('--no-sandbox')
        opt.add_argument('--disable-gpu')
        opt.add_argument('--disable-dev-shm-usage')
        self.driver = webdriver.Chrome(chrome_options=opt)
        # 取指定网址cookie
        self.driver.get(link)
        text = self.driver.page_source
        # print(text)
        sum_pages = re.findall("共(.*?)页", text)
        link_sum_pages = int(sum_pages[0])
        # self.driver.close()

        # driver.get(self.link2)
        # text = driver.page_source
        # sum_pages = re.findall("共(.*?)页", text)
        # link2_sum_pages = int(sum_pages[0])
        return link_sum_pages

    def get_all_links(self, link):
        # 传入总局和地方首页
        pages = self.get_pages(link)
        # 首页url没有规律直接添加到urls中
        self.urls.append(link)
        # 剩余页数根据规律循环添加到urls中
        for index in range(1, pages):
            url = '{}index_{}.html'.format(link, index)
            self.urls.append(url)

    def append_urls(self):
        self.get_all_links(gly.link1)
        self.get_all_links(gly.link2)

    def parse_link(self):
        for url in self.urls:
            self.driver.get(url)
            text = self.driver.page_source
            article_urls = re.findall('<a href="\.\./(CL\d+/\d+)\.html" target', text)
            for article_url in article_urls:
                article_url = 'http://samr.cfda.gov.cn/WS01/{}.html'.format(article_url)
                print(article_url)
                hkey = hashlib.md5(article_url.encode(encoding='utf-8')).hexdigest()
                lasttime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                list_data = [article_url, hkey, '0', '国家食品药品监督管理总局', lasttime]
                self.save_data(list_data)

    def save_data(self, list_data):
        con = pymysql.connect(host=self.host, db=self.db, user=self.user, passwd=self.passwd, charset=self.charset)
        cur = con.cursor()
        sql = 'insert into gly (link, hkey, tag, sitename, lasttime) values (%s, %s, %s, %s, %s)'
        try:
            cur.execute(sql, list_data)
            print('insert success')
        except Exception as e:
            con.rollback()
            print('error~', e)
        else:
            con.commit()
        cur.close()
        con.close()


if __name__ == '__main__':
    gly = GLY()
    gly.append_urls()
    gly.parse_link()
