import urllib.request
import re
import pymysql
from lxml import etree

'''
爬取和讯博客简要内容
'''

headers = [("User-Agent", "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"),
           ("Referer", "http://www.hexun.com/"),
           ("Host", "blog.hexun.com")]
opener = urllib.request.build_opener()
opener.addheaders = headers
data = opener.open("http://blog.hexun.com").read().decode("utf-8", "ignore")
html = etree.HTML(data)
urls = html.xpath("//div[@class='allnav']/dl/dd/a/@href")

# 获取各个类别博客超链接中的数字,例如http://blog.hexun.com/class69.htm中的69
array = [re.search("[0-9]+", i).group() for i in urls]
# 定义匹配获取浏览数和评论数的链接地址
pat1 = 'http://click.tool.hexun.com/click.aspx(.*?)">'
pat2 = '[0-9]+'
h_and_c_list = [0, 0]

conn = pymysql.connect(host="", user="",
                       passwd="", db="", port=3306, charset="utf8")

for num in array:  # 循环这些数字开始构建博客列表的访问地址
    for p in range(1, 6):  # 默认获取5页的列表
        url = "http://blog.hexun.com/group/class" + num + "_latest_p_" + str(p) + ".html"
        data = opener.open(url).read().decode("utf-8", "ignore")
        list_page = etree.HTML(data)
        # 获取列表页面中的博客内容超链接
        list_data = list_page.xpath("//div[@id='dinglist']/dl/dd[@class='txt']/h2/a[@target='_blank']/@href")
        if len(list_data) > 0:  # 如果当前页有博客列表数据
            for c_url in list_data:  # 循环访问每一篇博客
                data = opener.open(c_url).read().decode("gbk", "ignore")
                detail_page = etree.HTML(data)
                # 获取点击量和评论数链接中的参数部分
                h_m_url_list = re.compile(pat1).findall(data)
                if len(h_m_url_list) == 1:
                    h_m_url = "http://click.tool.hexun.com/click.aspx" + h_m_url_list[0]  # 构建完整的点击量与评论数链接
                    data2 = opener.open(h_m_url).read().decode("utf-8", "ignore")
                    h_and_c_list = re.compile(pat2).findall(data2)
                # 获取博客标题
                blog_title = detail_page.xpath("//span[@class='ArticleTitleText']/a/text()")
                sql_str = "INSERT INTO hexun_blog_data (name,url,hits,comment) VALUES ('" \
                          + blog_title[0] + "','" + c_url +"'," \
                          + h_and_c_list[0] + "," + h_and_c_list[1] + ")"
                try:
                    conn.query(sql_str)
                except Exception as err:
                    print(err)
conn.close()
