# coding:utf-8

import requests
from bs4 import BeautifulSoup
import silentNovel.headersUtil




proxies = { "https": "https://220.172.41.179"}


# 要爬的地址
response = requests.get(
    url='http://www.qb5200.com/top/allvisit_1.html',
    #headers=silentNovel.headersUtil.getHeaders(),
    proxies=proxies,
    timeout=20
)
# 设置编码
response.encoding = response.apparent_encoding
# 获取页面对象
soup = BeautifulSoup(response.text, features='html.parser')

# 页面中的table标签
top_table = soup.table

# print(top_table)

odd = top_table.find_all(class_="odd")

# 存放有效小说url集合
novelList = []

# 获取a标签的url
for item in odd:
    item_a = item.find('a')
    if item_a is not None:
        href = item_a.get('href')
        novelList.append(href)

# 获取小说内容
for item in novelList[0:1]:
    response = requests.get(
        url=item,
        #headers=silentNovel.headersUtil.getHeaders()
        proxies=proxies
    )
    response.encoding = response.apparent_encoding
    soup = BeautifulSoup(response.text, features='html.parser')
    # 获取各个章节地址a标签
    content_a_list = soup.find(class_="a").find_all('a')

    for a_item in content_a_list:
        a_url = item + a_item.get('href')
        response = requests.get(
            url=a_url,
            #headers=silentNovel.headersUtil.getHeaders()
            proxies=proxies
        )
        print(a_url)
        response.encoding = response.apparent_encoding
        soup = BeautifulSoup(response.text, features='html.parser')
        #print(soup)
        content = soup.find(id="content")
        if content is not None:
            content_text = content.get_text()
            print(content_text)
