#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :nlc_bookname.py
# @Time      :2023/10/13 
# @Author    :CL
# @email     :1037654919@qq.com
import random
import time
from retrying import retry
import requests
from bs4 import BeautifulSoup
import math

from douban import USER_AGENT_LIST
from utils import mongo_manager

nlc_book_name = mongo_manager('nlc_book_name', db='wx_read')

headers = {
    "Accept": "*/*",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Connection": "keep-alive",
    "Referer": "https://movie.douban.com/typerank?",
    "Sec-Fetch-Dest": "empty",
    "Sec-Fetch-Mode": "cors",
    "Sec-Fetch-Site": "same-origin",
    "User-Agent": random.choice(USER_AGENT_LIST),
    "X-Requested-With": "XMLHttpRequest",
    "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Google Chrome\";v=\"114\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Linux\""
}
cookies = {
    # "bid": "IoWM2l63OaQ",
    # "_pk_id.100001.4cf6": "bfbb85ec3b2f9171.1693212023.",
    # "__yadk_uid": "pYhiu3ZErcq9rDud6uK3t6JzWmc43z39",
    # "ll": "\"118173\"",
    # "_ga": "GA1.1.1675583776.1693212024",
    # "_ga_RXNMP372GL": "GS1.1.1694317804.3.0.1694317804.60.0.0",
    # "ap_v": "0,6.0",
    # "_pk_ref.100001.4cf6": "%5B%22%22%2C%22%22%2C1696923072%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DJYQ6eBeq_jFR2myslmXH4riITyKYsa78nC_qTWlMi5x5XP3lC721vnoZE37g-E4msq2DOpLOpAPPjxuB8Lz4519VDFCjTszzzH-vmTMmbzu%26wd%3D%26eqid%3D8f289cca00000572000000066524fdb4%22%5D",
    # "_pk_ses.100001.4cf6": "1",
    # "__utma": "223695111.25768331.1693212024.1693212024.1696923072.2",
    # "__utmc": "223695111",
    # "__utmz": "223695111.1696923072.2.2.utmcsr=baidu|utmccn=(organic)|utmcmd=organic",
    # "__utmb": "30149280.4.10.1696923072",
    # "_pbjs_userid_consent_data": "3524755945110770",
    # "trc_cookie_storage": "taboola%2520global%253Auser-id%3Dfa2dcbc4-a26f-4801-b5ab-c3f67598bfb9-tuctc1e836f",
    # "_vwo_uuid_v2": "DA51A6B993D05A1420990E7FB770C589F|caf518610cd157b502246880d7d9ce45",
    # "cto_bundle": "m0Pu_F92a0FHeGx2N2lFRVVNakNuQ3hCVk1heG1lR3Q0TzNVNENKVSUyQkhRTmNZS2VUalFweTdwU1d5TGZ1WWRkJTJCSzBLc3FLNTV0cHZ4Y3lIJTJGUXhOSFo1Y3pTOGFkQUM0eEVPZXFFUTNZTTFReDM4M1pSV0ZURlZVQWklMkJNZFNDQ1VneW9DNHRIWnllaHRpanU0MjVaa3ZLaFI5dyUzRCUzRA",
    # "cto_bidid": "BgvXB19US2N3VnVWdSUyRk9lNGo4cXkxUER6RVhrdkhIWTg5Z0htYVhERnZwUVdHT3FlbE1KbmQlMkJvSGIlMkY2bFVlYW96Y082MFV0cEJsRkpNOTUwY2FFajJJNEJQJTJGSUR1NUNZZ2dnWFlmSiUyRmliN0I2azglM0Q",
    # "__gads": "ID=a186e19481dbd354-2220b5a030e30074:T=1696923106:RT=1696925266:S=ALNI_MZj0TQWjVO7K6H4Pt6vmVqubqp7jA",
    # "__gpi": "UID=00000a176f85104d:T=1696923106:RT=1696925266:S=ALNI_MZxxF-rEMhKk8Io26Z_eZqPXaXq9A"
}
def get_proxy():
    import random
    port = random.randint(24000, 24400)
    proxies = {'http': f'http://zheng123:zheng123@wz1.iinti.cn:{port}',
               'https': f'http://zheng123:zheng123@wz1.iinti.cn:{port}'}
    return proxies
@retry(stop_max_attempt_number =5)
def get_nlc(
            url="http://read.nlc.cn/allSearch/searchList?searchType=1&showType=1&pageNo=20&searchWord=&classification=A"):
    # url = "http://read.nlc.cn/allSearch/searchList?searchType=1&showType=1&pageNo=20&searchWord=&classification=A"

    response = requests.get(url, headers=headers, cookies=cookies, proxies=get_proxy())

    # print(response.text)
    print(response)
    if response.status_code == 200:
        return response.text
    else:
        time.sleep(5)
        raise f'请求状态错误：{response.status_code}'
def parse_class_name():
    tables = """
    <table class="YMH2019_New_middleNav">
    			<tbody><tr>
    				<td class="td2">图书分类</td>
    				<td>
    					<div class="list3 list_hide Z_clearfix">
    						<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=A">马克思主义、列宁主义、毛泽东思想、邓小平理论<span class="num">(233)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=B">哲学、宗教<span class="num">(393)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=C">社会科学总论<span class="num">(165)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=D">政治、法律<span class="num">(1033)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=E">军事<span class="num">(42)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=F">经济<span class="num">(508)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=G">文化、科学、教育、体育<span class="num">(424)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=H">语言、文字<span class="num">(174)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=I">文学<span class="num">(2266)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=J">艺术<span class="num">(476)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=K">历史、地理<span class="num">(292)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=N">自然科学总论<span class="num">(53)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=O">数理科学和化学<span class="num">(43)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=P">天文学、地球科学<span class="num">(45)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=Q">生物科学<span class="num">(57)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=R">医药、卫生<span class="num">(706)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=S">农业科学<span class="num">(203)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=T">工业技术<span class="num">(276)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=U">交通运输<span class="num">(80)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=V">航空、航天<span class="num">(7)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=X">环境科学、安全科学<span class="num">(19)</span></a></label>
    								<label><a href="/allSearch/searchList?searchType=1&amp;showType=1&amp;pageNo=1&amp;searchWord=&amp;classification=Z">综合性图书<span class="num">(186)</span></a></label>
    								</div>
    					<div class="more">
    						<a>显示更多 ↓</a>
    					</div>
    				</td>
    			</tr>
    		</tbody></table>
    """
    soups  =BeautifulSoup(tables,'lxml')
    datas = soups.find('div',class_ = 'list3 list_hide Z_clearfix').find_all('a')
    class_name =[]
    for data in datas:
        href = 'http://read.nlc.cn/'+data['href']
        name = data.get_text()
        # print(href,name)
        class_name.append([href,name])
    return class_name
def main():
    res = get_nlc()
    print(res)
    soups = BeautifulSoup(res, 'lxml')
    datas = soups.find('ul', class_='YMH2019_New_BSLW_List1 Z_clearfix').find_all('li')
    for data in datas:
        print()
        tt = data.find('span', class_='tt')
        print(tt.text)
        txt = data.find('div', class_='txt').get_text()
        print(txt)
        insert_data = {'_id': tt, 'bookname': tt, 'info': txt}
        try:
            nlc_book_name.insertOne(insert_data)
        except Exception as e:
            nlc_book_name.updateOne({'_id': insert_data["_id"]}, insert_data)


if __name__ == "__main__":
    print()
    class_name = parse_class_name()
    for class_name_ in class_name:
        href = str(class_name_[0])
        name = str(class_name_[1]).split('(')[0]
        num =int( str(class_name_[1]).split('(')[-1].split(')')[0])
        print(href,name,num)
        for pageNo in  range(1,math.ceil(num/12)):
            url  = 'http://read.nlc.cn/allSearch/searchList?searchType=1&showType=1&pageNo=' + str(pageNo) +\
            '&searchWord=&classification=' + href.split('=')[-1]
            print(url)
            res = get_nlc(url = url)
            # print(res)
            soups = BeautifulSoup(res, 'lxml')
            datas = soups.find('ul', class_='YMH2019_New_BSLW_List1 Z_clearfix').find_all('li')
            for data in datas:
                try:
                    tt = data.find('span', class_='tt').text
                    txt = data.find('div', class_='txt').get_text()
                    print(tt,txt)
                    insert_data = {'_id': tt, 'bookname': tt,'class_name':name,'class_num':num,'class_url':href, 'info': txt,}
                    try:
                        nlc_book_name.insertOne(insert_data)
                    except Exception as e:
                        nlc_book_name.updateOne({'_id': insert_data["_id"]}, insert_data)
                except Exception as e:
                    print(e)


