# -*-coding:utf-8-*-
from urllib.request import urlopen
from urllib import request
from bs4 import BeautifulSoup
import pymysql
import time
import urllib.request

class Rspider:
    def Store(self):
        #链接数据库
        connection = pymysql.connect(host='192.168.1.3',port=3307, user='root', password='456321zj', db='py', charset='utf8', cursorclass=pymysql.cursors.DictCursor)
        cur = connection.cursor()
        cur.execute("USE py")
        pageURL = set()
        for i in range(1,2):
            pages = ""
            print("【这是第"+str(i)+"页】"+pages)
            if pages not in pageURL:
                #header
                # url = r'http://rsbbs.xidian.edu.cn'
                headers = {
                    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0',
                    'Referer': '',
                    'Connection': 'keep-alive'}
                req = request.Request(pages, headers=headers)
                html = request.urlopen(req)
                bsObj = BeautifulSoup(html)
                books = bsObj.findAll("div",{"class": "result"})
                print(str(books))
                try:
                    for book in books:
                        info = book.find('a');
                        bookName = info.attrs["title"];
                        bookImg = book.find('img').attrs["src"];
                        bookScore = book.find("span",{"class":"rating_nums"}).get_text().strip();
                        bookInfo = book.find("span",{"class":"subject-cast"}).get_text().strip();
                        bookDesc = book.find('p').get_text().strip();
                        bookLink = book.find(lambda a: len(tag.attrs) == 3).attrs["href"];

                        # for info in book.findAll('a'):
                        #     bookName=info.attrs[""]

                        #     for person in tiao.findAll('span', attrs={'class': "by"}):
                        #         T = person.get_text().strip()
                        #         print("发帖人：" + person.get_text().strip())
                        #         [s.extract() for s in tiao.findAll('span', attrs={'class': "by"})]

                        #     P = tiao.get_text().strip()
                        #     print("标题: "+tiao.get_text().strip())

                        #     #get every tiao Likn
                        #     if 'href' in tiao.attrs:
                        #         try:
                        #             tiao_links = "http://rsbbs.xidian.edu.cn/" + tiao.attrs['href']
                        #             tiao_html = urlopen(tiao_links)
                        #             tiao_bsObj = BeautifulSoup(tiao_html.read())
                        #             # [q.extract() for q in tiao_bsObj.findAll('blockquote')]
                        #             content = tiao_bsObj.findAll("div", {"class": "message"})[0]
                        #             R = content.get_text().strip()
                        #             print("帖子内容: " + R)
                        #             print('\n')
                        #         except (ValueError, IndexError) as e:
                        #             pass

                        #         pageURL.add(pages)
                        #将发帖人、帖子标题、帖子内容插入数据库
                        print(bookName)
                        cur.execute("INSERT INTO book (book_name,book_link,book_img_link,book_desc,book_info,book_score) VALUES (\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")", (bookName,bookLink,bookImg,bookDesc,bookInfo,bookScore))
                        cur.connection.commit()
                except Exception as e:
                    print(str(e))
                    # time.sleep(10)  # 设置时间间隔为3秒
if __name__ == '__main__':
    RS = Rspider()
    RS.Store()