import pymysql

from urllib import request
from bs4 import BeautifulSoup            #Beautiful Soup是一个可以从HTML或XML文件中提取结构化数据的Python库


# 链接数据库
db = pymysql.connect(host = "127.0.0.1",port = 3306,user = "root",passwd = "root",db = "test",charset="utf8")
cursor = db.cursor()
 
#构造头文件，模拟浏览器访问
cursor.execute("select * from url")
data = cursor.fetchall()
for info in data:
    url="http://172.16.40.70:8009/" + info[4]
    headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
    #re = request.urlopen(url).status
    #if re == 200:
    page = request.Request(url,headers=headers)
    try:
        request.urlopen(page,timeout=5)
        page_info = request.urlopen(page).read().decode('utf-8')#打开Url,获取HttpResponse返回对象并读取其ResposneBody
    
        # 将获取到的内容转换成BeautifulSoup格式，并将html.parser作为解析器
        soup = BeautifulSoup(page_info, 'html.parser')
        # 以格式化的形式打印html
        #print(soup.prettify())
         
        titles = soup.find_all('a')# 查找所有a标签中class='title'的语句

        for title in titles:
            if(title.string and title.string != '刷新'):
                if(title.get('href')):
                    # 先查找是否存在相同的模块下相同的名称
                    cursor.execute("select * from info where module_name='"+info[3]+"' and module_des='"+title.string+"'")
                    check_data = cursor.fetchone()
                    if not check_data:
                        sql = "insert into info (pid,module_name,module_des,program) values (%s,%s,%s,%s)" % (info[1],"'"+info[3]+"'","'"+title.string+"'","'"+title.get('href')+"'")
                        print(sql)
                        try:
                            cursor.execute(sql)
                            db.commit()
                        except:
                            db.rollback()
    except request.HTTPError:
        print(request.HTTPError)
        
    