import requests
from bs4 import BeautifulSoup
import re
import pandas as pd


def getHTMLText( url ):
    """
    爬取HTML网页通用框架
    :param url: 链接
    :return: text
    """
    headers = {'User-Agent': 
               'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'
    }

    r = requests.get(url, headers=headers)
    r.encoding = r.apparent_encoding
    text = r.content.decode('utf-8')
    # text = r.text
    return text


def get_soup(url):
    """
    使用bs4解析HTML页面
    """
    text = getHTMLText(url)
    soup = BeautifulSoup(text, 'lxml')
    # print(soup.prettify())
    #print(soup.original_encoding)
    return soup

def get_books_link():
    # 得到豆瓣读书TOP50链接
    urls = ["https://book.douban.com/top250?start=0",
            "https://book.douban.com/top250?start=25"]      # 可添加页面链接获得更多图书链接

    book_links = []
    for url in urls:
        soup = get_soup(url)
        children = soup.find_all('tr', class_='item')
        for child in children:
            book_links.append(child.td.a.attrs['href'])
    return book_links

def get_book_name(soup):
    """
    得到书名
    """
    children = soup.find_all('span', property="v:itemreviewed")
    for child in children:
        book_name = child.string       # just once
    return book_name



def get_publication_status(soup):
    """
    得到图书出版情况
    """
    def pares(s):
        # 使用正则表达式匹配没有标签的文本
        c = re.compile(r'<span class="pl">(.*?):</span>(.*?)<br/>')
        return (c.findall(s))


    children = soup.find_all('div', id='info')
    for child in children:
        info = pares(str(child))
        info_dit = {}
        for i in range(len(info)):
            if info[i][0] == "出版社":
                info_dit['出版社'] = info[i][1].strip()
            elif info[i][0] == "出版年":
                info_dit['出版年'] = info[i][1].strip()
            elif info[i][0] == "定价":
                info_dit["定价"] = info[i][1].strip()
        publication_status = child.a.string.replace(' ', '').replace('\n', '')+ '/' + info_dit['出版社'] + '/' + \
                             info_dit["出版年"] + '/' + info_dit["定价"]
    return publication_status
        

def get_score(soup):
    """
    得到评分
    """
    children = soup.find_all('div', id="interest_sectl")
    for child in children:
        score = child.strong.string.replace(' ', '').replace('\n', '')
    return score

def get_votes(soup):
    """
    得到评分人数
    """
    children = soup.find_all('div', id="interest_sectl")
    for child in children:
        votes = child.find_all('span', property="v:votes")[0].string.replace(' ', '').replace('\n', '')
    return votes

def get_star_rate(soup, star_num):
    """
    得到星比例
    """
    children = soup.find_all('div', id="interest_sectl")
    for child in children:
        chileren2 = child.find_all('span', class_="rating_per")
        votes = []
        for child2 in chileren2:
            vote = child2.string.replace(' ', '').replace('\n', '')
            votes.append(vote)
        
    return votes[int(star_num-1)]


if __name__ == '__main__':
    columns = ["名称", "出版情况", "评价分数", "评价人数", "5星占比", 
            "4星占比", "3星占比", "2星占比", "1星占比"]
    book_info =  pd.DataFrame(columns=columns)

    book_links = get_books_link()
    for link in book_links[:10]:
        soup = get_soup(link)
        data = []
        data.append(get_book_name(soup))
        data.append(get_publication_status(soup))
        data.append(get_score(soup))
        data.append(get_votes(soup))
        for i in range(5):
            data.append(get_star_rate(soup, i+1))
        print(len(data))
        book_info.loc[len(book_info)] = data

    
    book_info.to_excel('./豆瓣读书Top50.xls',sheet_name='豆瓣读书Top50', index=False)   
    print(book_info)
    
        
    

