import urllib.request
import bs4
from bs4 import BeautifulSoup
import sqlite3

class MySpider:

      def initDB(self):
            #初始化数据库
            self.con = sqlite3.connect("quotes.db")
            self.cursor = self.con.cursor()
            try:
                  self.cursor.execute("drop table tags")
            except:
                  pass
            try:
                  self.cursor.execute("drop table quotes")
            except:
                  pass
            self.cursor.execute("create table quotes (ID INTEGER PRIMARY KEY AUTOINCREMENT  ,quote varchar(1024),author varchar(256),author_link varchar(256))")
            self.cursor.execute("create table tags (ID INTEGER PRIMARY KEY AUTOINCREMENT ,qID int,tag varchar(256),tag_link varchar(256))")

      def openDB(self):
            #打开数据库
            self.con = sqlite3.connect("quotes.db")
            self.cursor = self.con.cursor()

      def insertQuote(self,quote,author,author_link):
            sql="insert into quotes (quote,author,author_link) values (?,?,?)"
            self.cursor.execute(sql,[quote,author,author_link])
            self.cursor.execute("select ID from quotes order by ID desc limit 1")
            row=self.cursor.fetchone()
            ID=row[0]
            return ID

      def insertTag(self,qID,tag,tag_link):
            sql="insert into tags (qID,tag,tag_link) values (?,?,?)"
            self.cursor.execute(sql,[qID,tag,tag_link])

      def closeDB(self):
            #关闭数据库
            self.con.commit()
            self.con.close()

      def spider(self,url):
            #爬虫函数
            print(url)
            try:
                  resp=urllib.request.urlopen(url)
                  html=resp.read().decode()
                  soup=BeautifulSoup(html,"lxml")
                  divs=soup.select("div[class='col-md-8'] div[class='quote']")
                  for div in divs:
                        quote=div.select_one("span[class='text']").text
                        author=div.select_one("small").text
                        author_link=div.select_one("span:nth-of-type(2) a")["href"]
                        ID=self.insertQuote(quote,author,author_link)
                        links= div.select("div[class='tags'] a")
                        for link in links:
                               self.insertTag(ID,link.text,link["href"])
                  return soup.select("nav ul[class='pager'] li[class='next']")
            except Exception as err:
                  print(err)

      def process(self):
            self.initDB()
            n=0
            con=True
            while con:
                  n=n+1
                  url="http://quotes.toscrape.com/page/"+str(n)+"/"
                  con=self.spider(url)
            self.closeDB()

      def show(self):
            #显示函数
            self.openDB()
            self.cursor.execute("select ID,quote,author,author_link from quotes")
            quotes=self.cursor.fetchall()
            no=0
            for quote in quotes:
                  no=no+1
                  print("No",no)
                  ID=quote[0]
                  print(quote[1])
                  print(quote[2])
                  print(quote[3])
                  self.cursor.execute("select qID,tag,tag_link from tags where qID="+str(ID))
                  tags=self.cursor.fetchall()
                  for tag in tags:
                        print(tag[1]," --- ",tag[2])
                  print()
            print("Total ",len(quotes))
            self.closeDB()

#主程序
spider=MySpider()
while True:
      print("1.爬取")
      print("2.显示")
      print("3.退出")
      s=input("选择(1,2,3):")
      if s=="1":
            spider.process()
      elif s=="2":
            spider.show()
      elif s=="3":
            break