import requests
import os
from bs4 import BeautifulSoup
import threading
import time

'''
参考---多选  文章
https://blog.csdn.net/qq_40695895/article/details/79645237?spm=1001.2014.3001.5502
'''

class gettext(threading.Thread):
    def __init__(self, chapter_list, book, lock, folock, re_header):
        threading.Thread.__init__(self)
        self.chapter_list = chapter_list
        self.book = book
        self.lock = lock
        self.folock = folock
        self.re_header = re_header
        self.exitflag = False

    def run(self):
        while not self.exitflag:
            # 把共有资源锁起来
            self.lock.acquire()
            if len(self.chapter_list) != 0:
                data = self.chapter_list.pop()
                # 获得链接后打开锁
                self.lock.release()

                # 获取第一页页面
                while 1:
                    try:
                        r = requests.get(data[1], params=re_header)
                        # 成功获得页面跳出循环，否则继续
                        if r.status_code == 200:
                            break
                    except:
                        continue
                r.encoding = 'gbk'
                soup = BeautifulSoup(r.text, "html.parser")

                # 这个网站把每个章节分为两页，要分两次获取
                # 获取章节名和第一页的内容
                title = soup.select('.nr_title')[0].text
                content_1 = soup.select('#nr1')[0].text

                time.sleep(0.1)
                # 获取第二页页面
                while 1:
                    try:
                        r = requests.get(data[1].replace('.html', '_2.html'), headers=re_header, timeout=2)
                        # 成功获得页面跳出循环，否则继续
                        if r.status_code == 200:
                            break
                    except:
                        continue
                r.encoding = 'gbk'
                soup = BeautifulSoup(r.text, "html.parser")

                # 第二部分章节内容
                content_2 = soup.select('#nr1')[0].text

                # 拼接两部分内容 详细请搜索字符串join方法
                str1 = ''
                chapter_content = str1.join([content_1, content_2])

                self.folock.acquire()
                # 将章节内容放进储存对象中
                self.book.put(data[0], title, chapter_content)
                print(title)
                self.folock.release()

            else:
                # chapter_list长度为0时退出线程
                self.exitflag = True
                self.lock.release()


# 定义一个storage类用来暂时储存小说内容
class storage:
    def __init__(self):
        self.content = []

    def put(self, index, title, content):
        self.content.extend([[index, title, content]])


book_id = '36282'
url = 'http://m.50zw.la/chapters_' + book_id + '/'
book_url = 'http://m.50zw.la/book_' + book_id
web_url = 'http://m.50zw.la'
re_header = {
    'Referer': 'http://m.50zw.la/',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Mobile Safari/537.36'
}

'''*******************************获取小说信息********************************'''

# get pages
r = requests.get(book_url, headers=re_header)
r.encoding = 'gbk'
soup = BeautifulSoup(r.text, "html.parser")

# get book info
book_name = soup.select('.info p strong')[0].text
book_author = soup.select('.info p')[1].text
book_type = soup.select('.info p')[2].text
book_status = soup.select('.info p')[3].text
book_lastest = soup.select('.info p')[4].text
book_intro = soup.select('.intro')[0].text

print('名称：' + book_name)
print(book_author)
print('简介：' + book_intro + '\n')

'''***************************获取章节列表**********************************'''
print('\n正在获取章节列表')
chapter_list = []
i = 1
while 1:
    # 获取网页
    # 更改编码
    # 获得BeautifulSoup对象
    # 获取章节列表
    # url + str(i)第i页的url
    r = requests.get(url + str(i), params=re_header)
    r.encoding = 'gbk'
    soup = BeautifulSoup(r.text, "html.parser")

    i += 1

    temp_list = soup.select('.last9 a')
    for t in range(len(temp_list)):
        temp_list[t] = temp_list[t]['href']
    del temp_list[0]
    if (len(temp_list) == 0):
        break
    chapter_list.extend(temp_list)

for i in range(len(chapter_list)):
    chapter_list[i] = 'http://m.50zw.la' + chapter_list[i]
    # 对每个章节编号
    chapter_list[i] = [i, chapter_list[i]]

# 反向列表
chapter_list.reverse()

print('获取章节列表完成\n')

'''*************************************线程*******************************'''

print('创建线程')
# 保存小说内容的对象
book = storage()
# 保存所有线程的列表
threads = []

# 创建锁
lock = threading.Lock()
folock = threading.Lock()

# 创建10个线程
for i in range(100):
    # 创建一个线程
    thread = gettext(chapter_list, book, lock, folock, re_header)
    # 将创建好的线程添加到线程列表
    threads.append(thread)
    # 启动线程
    thread.start()

# 等待所有线程结束
for t in threads:
    t.join()

print('线程结束')
'''***********************************处理与写入*************************'''
print('正在处理数据')
# 定义一个桶数组
novel = [['a', 'a']] * len(book.content)

for t in book.content:
    index = t[0]

    # 因为python中列表是按引用传递的，所以这里我们传递的只是地址
    novel[index] = t

time.sleep(2)
print('准备下载')
time.sleep(2)
print('开始下载')

# 打开/创建文件
fo = open(book_name + '.txt', 'wb')

# write in
fo.write(('名称：' + book_name + '\n').encode('utf-8'))
fo.write((book_author + '\n').encode('utf-8'))
fo.write((book_type + '\n').encode('utf-8'))
fo.write((book_status + '\n').encode('utf-8'))
fo.write((book_lastest + '\n').encode('utf-8'))
fo.write(('简介：\n' + book_intro + '\n').encode('utf-8'))

for t in novel:
    title = t[1]
    chapter_content = t[2]

    # 写入章节名和内容
    fo.write((title).encode('utf-8'))
    fo.write((chapter_content).encode('utf-8'))

    # 打印提示
    print(title[0:-2] + '已下载')

# 关闭文件
fo.close()

print(book_name, '下载成功')
