# -*- coding: utf-8 -*-
#---------------------------------------
#   程序：帖子爬虫
#   版本：0.1.1
#   作者：woaitianwen
#   日期：2017-12-31
#   语言：Python 3.6
#   操作：输入网址后就获取帖子作者、内容和发布时间
#   功能：将楼主发布的内容就保存到mysql数据库。
#---------------------------------------

from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import mysql.connector


#连接mysql数据库
conn = mysql.connector.connect(user='root', password='root', database='tieba')
#使用cursor()方法获取操作游标
cur = conn.cursor()


#定义一个帖子的类
class Tieba:

    def __init__(self):
        url = "http://www.deyi.com/thread-11311721-1-1.html"
        baseurl = "http://www.deyi.com";
        self.url = url
        self.baseurl = baseurl
        self.datas = []
        #self.author = self.get_data()
        print("开始爬虫-------------------------------")

    #获取论坛网站的翻页数量
    def get_url(self):
        #总页数数组
        links = []
        response = urlopen(self.url)
        bsObj = BeautifulSoup(response, "lxml")

        maxlength = bsObj.find("div", {"class": "pg"})
        print("——————————————————")
        max_href = maxlength.find_all("a")

        max_a_pattern = re.compile(r'http://www.deyi.com/thread-.*?-(\d+)-1.html')
        max_b = max_a_pattern.findall(str(max_href))
        #str转为int
        max_b = list(map(int,max_b))
        max_c = max(max_b)
        #找最大的数值
        #print(max_c)
        #print('——————————————————————')

        # 默认页数为1
        page_number = 1
        #用正则获取最大页数信息
        #遍历每一页,获取页面的数量
        while page_number <= max_c:
            link = self.baseurl + '/thread-11311721-' + str(page_number) + '-1' + '.html'
            links.append(link)
            page_number = page_number + 1
            #print("[*] 爬取第{}页：".format(page_number-1))
            #print("[*] URL: {}".format(link))
            #print("")
        return links


    #获取帖子的作者、内容和时间
    def get_data(self):
        page_number = len(self.get_url())
        page_number = page_number + 1
        #print(page_number)
        for page_number in range(1,page_number):
            url = self.baseurl + '/thread-11311721-{}-1.html'.format(page_number)
            #print(url)
            #print("[*] 爬取第{}页：".format(page_number))
            #print("[*] URL: {}".format(url))
            html = urlopen(url)
            soup = BeautifulSoup(html, 'lxml')
            # 寻找'table'标签
            links = soup.find_all("table", {"id": re.compile("pid.*")})
            # 'table'标签的数量
            #print(len(links))

            for link in links:
                res_data = {}
                # 获取帖子的用户
                soup = link.find("div", {"class": "authi"})
                lst = soup.text.strip()
                #print("[*] 爬取第{}页：".format(page_number))
                #return lst
                #print(lst)

                # 获取帖子内容
                # 寻找'td'标签
                try:
                    soup_post = link.find("td", {"class": "t_f"}).text.strip()
                    #print(soup_post)
                    # 正则表达式
                    dr = re.compile(
                        r'\r|\n|\d{4}[-/]\d{2}[-/]\d{1,2}|\d{1,2}[:/]\d{1,2}|上传|下载附件|\d+\.*\d+|\(.*?\)|广 告.pcb{.*}|\n|',
                        re.S)
                    # 正则的替换
                    dd = dr.sub('', soup_post)
                    # 移除空格
                    dd_s = dd.strip()
                    #return dd_s
                    #print(dd_s)

                    soup_date = link.find("em", {"id": re.compile('authorposton.*')})
                    date = soup_date.text.strip()
                    dr_date = re.compile(r'发表于')
                    dd_date = dr_date.sub('', date)
                    dd_date_s = dd_date.strip()
                    # return dd_date_s
                    #print(dd_date_s)
                    res_data['date'] = dd_date_s

                    #print(res_data)

                    # 保存到mysql数据库
                    # 执行SQL语句到数据库执行
                    cur.execute("INSERT INTO tiebas(author, content, dates) VALUES (%s, %s, %s)",
                                (lst, dd_s, dd_date_s))
                    # 返回执行execute()方法后影响的行数
                    cur.rowcount
                    # 提交之后，再关闭cursor和连接
                    conn.commit()


                except AttributeError:
                    pass



baseTieba = Tieba()
baseTieba.get_data()
