#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :拉看网测试.py
# @Time      :2024/5/9 
# @Author    :CL
# @email     :1037654919@qq.com
# https://www.lakanshu.com/book/2851/
import os
import time
import requests
from bs4 import BeautifulSoup


#获取章节链接
def get_cha_url(url = "https://www.lakanshu.com/book/2851/"):
    #page2
    # url ='https://www.lakanshu.com/index/2851/2/'
    headers = {
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "accept-language": "zh-CN,zh;q=0.9",
        "cache-control": "no-cache",
        "pragma": "no-cache",
        "priority": "u=0, i",
        "sec-ch-ua": "\"Chromium\";v=\"124\", \"Google Chrome\";v=\"124\", \"Not-A.Brand\";v=\"99\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Linux\"",
        "sec-fetch-dest": "document",
        "sec-fetch-mode": "navigate",
        "sec-fetch-site": "none",
        "sec-fetch-user": "?1",
        "upgrade-insecure-requests": "1",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36"
    }
    cookies = {
        "Hm_lvt_f004e3d6881c9bbc22260590c5ca64c0": "1715237568",
        "Hm_lpvt_f004e3d6881c9bbc22260590c5ca64c0": "1715237593"
    }

    response = requests.get(url, headers=headers, cookies=cookies)

    # print(response.text)
    print(response.url,response)
    soup = BeautifulSoup(response.text, 'html.parser')
    datas = soup.find_all("div", class_="section-box")[-1].find_all('a')
    lists =[]
    for a in datas:
        print(a.text,a['href'])
        lists.append({'title':a.text.strip(),'href':'https://www.lakanshu.com'+a['href']})
    return lists

#获取章节内容
def get_cha_data(url  = 'https://www.lakanshu.com/read/2851/5439125.html'):

    headers = {
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "accept-language": "zh-CN,zh;q=0.9",
        "cache-control": "no-cache",
        "pragma": "no-cache",
        "priority": "u=0, i",
        "sec-ch-ua": "\"Chromium\";v=\"124\", \"Google Chrome\";v=\"124\", \"Not-A.Brand\";v=\"99\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Linux\"",
        "sec-fetch-dest": "document",
        "sec-fetch-mode": "navigate",
        "sec-fetch-site": "none",
        "sec-fetch-user": "?1",
        "upgrade-insecure-requests": "1",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36"
    }
    cookies = {
        "lt_iscookie": "1",
        "Hm_lvt_f004e3d6881c9bbc22260590c5ca64c0": "1715237568",
        "night": "0",
        "fontFamily": "null",
        "fontColor": "null",
        "fontSize": "null",
        "bg": "null",
        "Hm_lpvt_f004e3d6881c9bbc22260590c5ca64c0": "1715238033"
    }
    # url = "https://www.lakanshu.com/read/2851/5439125.html"
    response = requests.get(url, headers=headers, cookies=cookies)

    # print(response.text)
    print(response.url,response)
    return response.text


def main(): #下载一本书
    listss =[
        {'name':'红楼梦',"url":'https://www.lakanshu.com/book/7528/'},
             {'name':'同时穿越了99个世界', "url": 'https://www.lakanshu.com/book/2851/'},
             {'name': '冷血法医', "url": 'https://www.lakanshu.com/book/7573/'},
             {'name':'我这一辈子 文本 （从我这一辈子开始）',"url":'https://www.lakanshu.com/book/15444/'}
    ]

    for list in listss:
        url = list['url']
        book_id = url.split('/')[-2]
        path = f'/home/chenglei3/work/data/拉看网/{list["name"]}' #保存路径 todo
        page = 1
        os.makedirs(path,exist_ok=True)
        while True:
            print('page',page)
            if page == 1:
                url  = f'https://www.lakanshu.com/book/{book_id}'
            else:
                url = f'https://www.lakanshu.com/index/{book_id}/{page}/'
            cha_urls = get_cha_url(url)
            for cha_url in cha_urls:
                # print(cha_url)
                cha_data = get_cha_data(cha_url['href'])
                soup = BeautifulSoup(cha_data, 'html.parser')
                datas = soup.find("div", class_="content")
                text = ''
                for p in datas.find_all('p'):
                    text += p.text + '\n'
                with open(f'{path}/{cha_url["title"]}.txt', 'w', encoding='utf-8') as f:
                    f.write(text)
            if len(cha_urls)<50:
                break
            page +=1
            time.sleep(10)#休息一下，防止被封IP

if __name__ == '__main__':

    main()

