from bs4 import BeautifulSoup
import re
import requests
import os
from selenium import webdriver
import time
from selenium.webdriver.common.keys import Keys

class Book:
    title = ''
    url = ''

class Scheduler:
    def __init__(self,path,root_url,count):
        self.root_url = root_url
        self.count = count
        self.path = save_url
    
    def run_spider(self):
        #print(root_url)
        urls = list()
        #if root_url is not None:
        if useRequest == True:
            try:
                response = requests.get(root_url)
                print(response)
                if response.status_code == 200:
                    print("website responded")
                    response.encoding = "utf-8"
                    if os.path.exists(self.path):
                        with open(save_url,'a') as fp:
                            print('开始写入数据')
                            fp.write(response.text + '\n')
                        fp.close()
                    else:
                        print('路径不存在')
            except Exception as e:
                print("error:{0}".format(e))
        else:
            chrome_options = webdriver.ChromeOptions()
            chrome_options.add_argument("--headless")
            chrome_options.add_argument("--disable-gpu")
            chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
            browser = webdriver.Chrome(options = chrome_options)
            #browser.get("https://www.szlib.org.cn/MyLibrary/newbook.jsp?catname=%E6%B7%B1%E5%9B%BE%E6%96%B0%E4%B9%A6%E9%80%89%E8%B4%AD%E7%9B%AE%E5%BD%95&library=044005&local=2Z")
            browser.get(root_url)
            print(browser)
            i = 1
            bookList = []
            while(i < 20):
                browser.execute_script('window.scrollTo(0,document.body.scrollHeight)')
                time.sleep(2)
                books = browser.find_elements_by_xpath('//*[@id="booklist"]/li')
                for book in books:
                    title = book.find_element_by_xpath('.//h3[@class="title"]')
                    publisher = book.find_element_by_xpath('.//span[@class="publisher"]')
                    url = book.get_attribute('name')
                    url = url.replace("linkmetatable","v_Tableid")
                    url = url.replace("linkmetaid","v_recno")
                    url = "https://www.szlib.org.cn/MyLibrary/ReserveSubmit.jsp?doclibrary=044005&local=2Z&"+url
                    #print(title.text)
                    #print(publisher.text)
                    bookList.append([title.text,publisher.text,url])
            #browser.execute_script('alert("To Bottom")')
            # time.sleep(1)
                sel = browser.find_element_by_xpath('//a[@class="next"]')
                sel.click()
                time.sleep(2)
                i = i + 1
            print('开始写入数据')
            with open(save_url,'w') as fp1:
                #fp1.write(title.text + '\n')
                for li in bookList:
                    fp1.write(" ".join(li))
                    fp1.write('\n')
            browser.close()
                

if __name__ == "__main__":
    root_url = "https://www.szlib.org.cn/MyLibrary/newbook.jsp?catname=%E6%B7%B1%E5%9B%BE%E6%96%B0%E4%B9%A6%E9%80%89%E8%B4%AD%E7%9B%AE%E5%BD%95&library=044005&local=2Z#"
    save_url = ".\lib.txt"
    useRequest = False
    Spider = Scheduler(save_url,root_url,20)
    Spider.run_spider()    