# -!- coding: utf-8 -!-
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#作者：cacho_37967865
#博客：https://blog.csdn.net/sinat_37967865
#文件：getWebTxt.py
#日期：2019-07-07
#备注：Python爬虫爬取小说
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

import requests
import random
import time
import math
from bs4 import BeautifulSoup
from pacong.txtdeal.base import deal_replace,deal_title,get_title
from pycacho.cachobase.file_deal import an_save_txt,a_save_txt,ann_save_txt
from pycacho.cachobase.deal_replace import replace_one_list
from pycacho.cachobase.logger import Logger
import re

logger = Logger("getWebTxt").get_log()

agent = ["Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre,'Accept-Language':'zh-CN,zh;q=0.9'",
         "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36,'Accept-Language':'zh-CN,zh;q=0.9'"]

headers = {
    'User-Agent': random.choice(agent)
}

# 在requests做请求的时候，为了避免ssl认证，可以将requests.get(url,verify=False), 但是这么设置会带来一个问题，日志中会有大量的warning信息, 可以配置下面4行
session = requests.Session()
session.keep_alive = False
requests.adapters.DEFAULT_RETRIES = 5
requests.packages.urllib3.disable_warnings()

itsh_url = 'https://www.itshang.com'   # 小说网
dizi_url = 'https://www.dizishu.com' # 弟子小说网
lqzw_url = 'https://www.luoqiuzw.com' # 落秋中文(不行)
zwla_url = 'http://m.42zw.la'   # book
bqge_url = 'https://m.bqgee.com'   # book
nnyq_url = 'https://www.xxyanqing5.com'   #
zwcc_url = 'http://www.42zw.cc'          #
#diyi_url = 'http://m.diyibanzhu.store'   #
jjsw_url = 'https://www.43tg.com'    #
po18_url = 'https://m.po18h.com'
dybz_url = 'https://www.6yydstxt226.com'



# 获取 bqge_url 某个小说所有章节 (book_url:页面元素地址，story_url：小说地址，keyword：关键字)
def get_book_chapter(story_url,keyword):
    resp = requests.get(story_url, headers=headers,timeout=20,verify=False)
    resp.encoding = 'utf-8'
    html = resp.text
    soup = BeautifulSoup(html,'html5lib')       # "lxml"解析器丢失数据
    chapter_link = []
    for chapter in soup.find_all('a'):
        try:
            if keyword in chapter.get('href') and '.html' in chapter.get('href'):
                url = chapter.get('href')
                chapter_link.append('https:'+url)
        except:
            pass
    #print(chapter_link)
    return(chapter_link)

def get_book_detail(out_txt,url,id_value):
    resp = requests.get(url, headers=headers,timeout=60,verify=False)
    resp.encoding = 'utf-8'
    html = resp.text.replace('<br /><br />','\n').replace('<br><br>','\n')
    soup = BeautifulSoup(html,'html5lib')    # "lxml"解析器丢失数据
    #print(soup)
    title = soup.h1.get_text().replace(' ','').replace('章章','章').replace('章','章 ')
    a_save_txt(out_txt, title)
    content = soup.find(id=id_value).get_text()
    print(title)
    a_save_txt(out_txt,content)


# 获取某个小说所有章节 (story_url：小说地址，keyword：关键字)
def get_myBook_chapter(story_url,keyword):
    resp = requests.get(story_url, headers=headers,timeout=20,verify=False)
    resp.encoding = 'gbk'
    html = resp.text
    soup = BeautifulSoup(html,'html5lib')       # "lxml"解析器丢失数据
    chapter_link = []
    for chapter in soup.find_all('a'):
        try:
            if '.html' in chapter.get('href'):
                row =[]
                url = chapter.get('href')
                title = chapter.text
                row.append(url)
                row.append(title)
                chapter_link.append(row)
        except:
            pass
    #print(chapter_link)
    return(chapter_link)

def get_myBook_detail(out_txt,url,title):
    resp = requests.get(url, headers=headers, timeout=60, verify=False)
    resp.encoding = 'gbk'
    html = resp.text
    cctxt = re.findall(r'cctxt.replace(.*);', html)
    info = re.findall(r'cctxt=\'(.*)\';', html)[0].replace('<br /><br />', '\n').replace('<br><br>', '\n')
    soup = BeautifulSoup(info, 'html5lib').text  # "lxml"解析器丢失数据
    content = deal_replace(soup, cctxt)
    print(url,title)
    ann_save_txt(out_txt, title)
    an_save_txt(out_txt,content)


# m.po18h某个小说所有章节
def get_po18_chapter(story_id,story_url):
    print(story_url)
    resp = requests.get(story_url, headers=headers)
    resp.encoding = 'utf-8'
    html = resp.text
    soup = BeautifulSoup(html,'lxml')
    chapter_link = []
    for link in soup.find_all('a'):
        try:
            if '.html' in link.get('href') and '/'+story_id+'/' in link.get('href'):
                url = link.get('href')
                chapter_link.append(po18_url + url)
        except:
            pass
    #print(chapter_link)
    return(chapter_link)

# m.po18h具体章节内容
def get_po18_detail(out_txt,url):
    resp = requests.get(url, headers=headers)
    resp.encoding = 'utf-8'
    html = resp.text.replace('<br/><br/>','\n')
    soup = BeautifulSoup(html,'html5lib')
    #print(soup)
    title = soup.h1.get_text().replace('\n','').replace('\t','')
    content = soup.find(id="nr1").get_text()
    #print(title)
    #print(content)
    ann_save_txt(out_txt, title)
    an_save_txt(out_txt,content)


#
def main(site):
    story_id = str(48726)   # 48726
    bookid = 4639
    out_txt = 'F:\PythonProject\Python\\' + story_id + '.txt'
    out_title = 'F:\PythonProject\Python\\title.txt'
    chapter_id = str(3375541)  #
    if site == 'book':
        book_url = itsh_url
        if book_url == itsh_url:  # 小说网 https://www.itshang.com/as/12044/
            keyword = '/as/'+story_id + '/'
            story_url = book_url + keyword
            url= get_book_chapter(story_url,keyword)
            n = url.index(book_url + keyword + chapter_id + '.html')     # 初始章节位置
            for i in range(len(url)):
                if i >= n:
                    chapter_url = url[i]
                    print(chapter_url)
                    get_book_detail(out_txt,chapter_url,'content')
                    time.sleep(2)
    if site =='mybook':
        book_url = dizi_url
        if book_url == dizi_url:
            keyword = '/b/' + story_id + '/'
            story_url = book_url + keyword
            url = get_myBook_chapter(story_url, keyword)
            n = url.index([chapter_id + '.html','第一章'])  # 初始章节位置 1317.弃车保帅
            for i in range(len(url)):
                if i >= n:
                    chapter_url = url[i][0]
                    #an_save_txt(out_title, deal_title(url[i][1]))
                    title = get_title(out_title)[i-12]
                    my_url = book_url+ '/files/article/html555/'+str(math.floor(bookid/1000))+'/'+str(bookid)+'/'+chapter_url
                    get_myBook_detail(out_txt,my_url,title)
                    time.sleep(1)

    if site == 'po18':                                # m.po18h
        story_id = str(17150)
        out_txt = 'F:\PythonProject\Python\\'+story_id+'.txt'
        list_url = po18_url + '/novel/list/' + story_id + '/1.html'
        story_url = po18_url + '/novel/' + story_id +'/'
        chapter_id = str(4589726)
        url = get_po18_chapter(story_id,list_url)
        for i in range(len(url)):
            n =  url.index(story_url+chapter_id+'.html')      # 元素位置
            if i >= n:
                print(url[i])
                get_po18_detail(out_txt,url[i])
                #time.sleep(5)


if __name__ == '__main__':
    main('mybook')                      # book,mybook
    #get_diyi_detail('F:\PythonProject\python\\xm11.txt', 'http://m.diyibanzhu.store/wap.php?action=article&id=734143')
    #get_jjsw_detail('F:\PythonProject\Python\\a.txt', 'https://www.43tg.com/52/52987/8557123.html')
