# 导入相应的库文件
import requests
from lxml import etree
import os
import re

def write(info):
    arr = ['标题','作者','分类','介绍']
    x = 0
    filePath = r'./content'
    if not os.path.exists(filePath):
        os.mkdir(filePath)
    fo = open(filePath+'/content.txt','a',encoding='utf-8')
    fo.write("*"*30+'\n')
    for i in info:
        fo.write(str(arr[x])+':'+str(i)+'\n')
        x += 1

    fo.write("*"*30+'\n')
    fo.close()

def listTostr(l):
    s = ''
    for i in l:
        s = s + " " + i
    return s

def get_img(url,num):
    # 下载图片
    r = requests.get(url, stream=True)
    print(r.status_code) # 返回状态码
    if r.status_code == 200:
        open('./img_{}.png'.format(num), 'wb').write(r.content) # 将内容写入图片
        print("done")
    del r

# 定义获取爬虫信息的函数
def get_info(url, num ):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:79.0) Gecko/20100101 Firefox/79.0'
    }
    html = requests.get(url,headers=headers)
    data = html.content.decode('utf-8')
    selector = etree.HTML(data)
    title = selector.xpath('//h2[@class="av-special-heading-tag "]/text()')[0]
    author = selector.xpath('//*[@id="av_section_1"]/div/div/div/div/div[2]/div/ul/li[2]/article/div/div/p/text()')[0]
    style = selector.xpath('//span[@class="blog-categories minor-meta"]/a/text()')
    intro = selector.xpath('//div[@class="avia_textblock  "]/p/text()')
    write([title, author, listTostr(style), listTostr(intro)])

    # 爬取小说封面图片
    img_src= selector.xpath('//img')
    for i in img_src:
        t = etree.tostring(i,encoding="utf-8").decode()
        src = re.sub(r'src=|"','',re.findall(r'src=".*?"', t)[0])
        num = num + 1
        get_img(src, num)
    return num
    
# 程序主入口
if __name__ == '__main__':
    num = 0
    html = requests.get('https://new.shuge.org/collections/')
    content = html.text

    reg = re.compile(r'https://new.shuge.org/view/.*?/')
    urls = re.findall(reg, content)
    # 获取所有数据
    print("→获取图书信息中")
    for url in urls:
        print("正在爬取"+url)
        try:
            num = get_info(url, num)
            print("爬取成功")
        except:
            print("爬取失败")
            continue
    print("→爬取完成")