import numpy as np
from bs4 import BeautifulSoup
import requests

def get_publisher(html):
    a = html.find("出版社:")
    a = a + 12
    b = html.find('br', a , a+50)
    b = b - 1
    return html[a:b]

def get_ISBN(html):
    a = html.find("ISBN:")
    a = a + 13
    b = html.find('br', a , a+50)
    b = b - 1
    return html[a:b]

def get_price(html):
    a = html.find("定价:")
    a = a + 11
    b = html.find('br', a , a+50)
    b = b - 2
    return html[a:b]

def get_rating_num(html):
    a = html.find("ll rating_num")
    a = a + 38
    b = html.find('</strong>', a , a+50)
    b = b - 1
    return html[a:b]

def get_intro(html):
    soup = BeautifulSoup(html, 'lxml')
    intros = soup.find('div', class_='intro')
    tmp = ""
    ii = 0
    for i in intros:
        tmp = i
        ii = ii + 1
        if ii > 1 :
            break
    tmp = str(tmp)
    return tmp[3:-4]

def get_imglink(html):
    soup = BeautifulSoup(html, 'lxml')
    img_link = soup.find_all('a', class_='nbg')
    res = 0
    flag = 0
    for i in img_link:
        if flag==0:
            res = i['href']
    return res

'''
输入： main_url,那个一页显示好多本的书的主页
返回： 一个作者名的数组
'''
def get_author(html):
    soup = BeautifulSoup(html, 'lxml')
    allp = soup.find_all('p', class_='pl')
    authors = [p.get_text() for p in allp]
    res = []
    for author in authors:
        res.append(author.split("/")[0])
    return res

def get_bname(html):
    soup = BeautifulSoup(html, 'lxml')
    alldiv = soup.find_all('div', class_='pl2')
    names = [a.find('a')['title'] for a in alldiv]
    # print(names)
    return names

def test():
    book_url = "https://book.douban.com/subject/1007305/"
    r2 = requests.get(book_url, headers = headers)
    html = r2.text
    print(get_publisher(html))
    print(get_ISBN(html))
    print(get_price(html))
    print(get_rating_num(html))
    print(get_intro(html))

def test2():
    headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
               'Accept - Encoding':'gzip, deflate, br',
               'Accept-Language':'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
               'Connection':'Keep-Alive',
               'Host':'book.douban.com',
               'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36 Edg/85.0.564.63'}
    
    # 输入参数
    main_urls = ["https://book.douban.com/top250?start=0",
                "https://book.douban.com/top250?start=25",
                "https://book.douban.com/top250?start=50",
                "https://book.douban.com/top250?start=75",
                "https://book.douban.com/top250?start=100"]
    
    # main_urls = ["https://book.douban.com/top250?start=0"]

    # 全局变量
    a_bname = []
    a_ISBN = []
    a_author = []
    a_publisher = []
    a_price = []
    a_grade = []
    a_intro = []
    a_imglink = []

    iii = 0
    for main_url in main_urls:
        iii = iii + 1
        print("正在爬取的页数: " + str(iii))
        rr = requests.get(main_url, headers = headers)
        html = rr.text  

#######################
        # 爬取书名
        for i in get_bname(html):
            a_bname.append(i)

        # # 爬取作者
        # for i in get_author(html):
        #     a_author.append(i)  
######################

        # soup = BeautifulSoup(html, 'lxml')
        # book_urls = soup.select(".pl2 a") # 获得每本书的 1.具体链接
        # # 每本书的详细url链接，进一步爬取相关信息
        # for tmp in book_urls:
        #     tmp = str(tmp)
        #     book_url = tmp[9:49]
        #     r = requests.get(book_url, headers = headers)
        #     html2 = r.text
        #     print(get_ISBN(html2))
            # a_ISBN.append(get_ISBN(html2))

            # a_imglink.append(get_imglink(html2))
            # a_publisher.append(get_publisher(html2))
            # a_price.append(get_price(html2))
            # a_grade.append(get_rating_num(html2))
            # a_intro.append(get_intro(html2))

    # a_ISBN = np.array(a_ISBN)

    a_bname = np.array(a_bname)
    # a_author = np.array(a_author)
    # a_publisher = np.array(a_publisher)
    # a_price = np.array(a_price)
    # a_grade = np.array(a_grade)
    # a_intro = np.array(a_intro)
    # a_imglink = np.array(a_imglink)

    

    # np.save("1.npy",a_bname)  
    # np.save("2.npy",a_ISBN)    
    # np.save("3.npy",a_author) 
    # np.save("4.npy",a_publisher) 
    # np.save("5.npy",a_price) 
    # np.save("6.npy",a_grade)
    # np.save("7.npy",a_intro) 
    # np.save("8.npy",a_imglink)  

def test3():
    a1 = np.load("1.npy")
    a2 = np.load("2.npy")
    a3 = np.load("3.npy")
    a4 = np.load("4.npy")
    a5 = np.load("5.npy")
    a6 = np.load("6.npy")
    a7 = np.load("7.npy")
    a8 = np.load("8.npy")

    print(a7[0])
    print("########")
    tmp = str(a7[0])
    print(tmp)
    # print(len(a2))
    # ii = 0
    # for i in a5:
    #     print(str(ii) + " : " + i)
    #     ii = ii + 1


def filter_data():
    a1 = np.load("1.npy")
    a2 = np.load("2.npy")
    a3 = np.load("3.npy")
    a4 = np.load("4.npy")
    a5 = np.load("5.npy")
    a6 = np.load("6.npy")
    a7 = np.load("7.npy")
    a8 = np.load("8.npy")

    index = [34, 77, 101,102,109] 
    a_a1 = np.delete(a1, index)
    a_a2 = np.delete(a2, index)
    a_a3 = np.delete(a3, index)
    a_a4 = np.delete(a4, index)
    a_a5 = np.delete(a5, index)
    a_a6 = np.delete(a6, index)
    a_a7 = np.delete(a7, index) 
    a_a8 = np.delete(a8, index)

    np.save("1.npy",a_a1)  
    np.save("2.npy",a_a2)    
    np.save("3.npy",a_a3) 
    np.save("4.npy",a_a4) 
    np.save("5.npy",a_a5) 
    np.save("6.npy",a_a6)
    np.save("7.npy",a_a7) 
    np.save("8.npy",a_a8) 
        
if __name__ == '__main__':

    # filter_data()
    test3()


























