import string
from urllib.request import Request,urlopen
from bs4 import BeautifulSoup
import requests
import re
import time
import pandas as pd
import random
class novelItem():
    def __init__(self):
        self.novel_name=''
        self.novel_url=''
        self.nuvel_abtract=''
    def update_data(self,name,url):
        self.novel_name=name
        self.novel_url=url
def get_urlAndname():
    novel_df=pd.DataFrame(columns=['name','url'])
    url='http://www.xbiquge.la/fenlei/1_1.html'
    headers={'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'}
    max_iter=2
    count=0
    novels = []
    name_list=[]
    while 1:
        html = requests.get(url, headers=headers).text
        if html==None:
            print('html is none')
            break
        BsObj = BeautifulSoup(html, 'lxml')
        if BsObj==None:
            print('BsObj is none')
            break
        boxs = BsObj.find('div', {'class': 'l'})
        if boxs==None:
            print('boxs is none')
            break
        pg = boxs.find('div', {'class': "page_b"})
        novel_data = {}
        tbodys= boxs.find_all('span', {'class': 's2'})
        # tbodys = table.find_all('tbody', {'id': re.compile("normalthread_[0-9]+")})
        if tbodys ==None:
            print('tbodys is none')
            break
        for tbody in tbodys:
            a_novel = tbody.find('a')
            novel = novelItem()
            novel.novel_name = a_novel.get_text()
            novel.novel_url = a_novel.attrs['href']
            if novel.novel_name not in name_list:
                name_list.append(novel.novel_name)
                novels.append(novel)
                if len(novel.novel_name)<20:
                    tabel_make=20-len(novel.novel_name)
                    string=''
                    for i in range(tabel_make):
                        string+='-'
                else:
                    string=''
                print('----',len(novels),'----',novel.novel_name,string)
        a_nxt = pg.find('a', {'class': 'next'})
        if a_nxt==None: #已到最后一页
            print('this is the last page')
            break
        else:
            url = a_nxt.attrs['href']
        time.sleep(random.randint(1,3))
    for i in range(len(novels)):
        novel_df.loc[i,'name']=novels[i].novel_name
        novel_df.loc[i,'url']=novels[i].novel_url
    novel_df.to_csv('biquge_novels_xh.csv')
    print()
def get_abstract():
    df=pd.read_csv('abooky_novels_(有简介).csv')
    df.set_index(['Unnamed: 0'],inplace=True)
    # df['abstract']=''
    requests.adapters.DEFAULT_RETRIES = 5 # 增加重连次数
    s = requests.session()
    for i in range(18106,len(df)):
        url=df.loc[i,'url']
        headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'}
        s.keep_alive = False
        html = s.get(url, headers=headers,timeout=5).text

        if html == None:
            print('html is none')
            time.sleep(random.randint(1, 3))
            continue
        BsObj = BeautifulSoup(html, 'lxml')
        if BsObj == None:
            print('BsObj is none')
            time.sleep(random.randint(1, 3))
            continue
        boxs = BsObj.find('div', {'class': 't_fsz'})
        if boxs == None:
            print('boxs is none')
            time.sleep(random.randint(1, 3))
            continue
        # tbodys = table.find_all('tbody', {'id': re.compile("normalthread_[0-9]+")})
        fonts=boxs.find_all('font')
        visted_text=[]
        if len(fonts)>0:
            abtracts=''
            for font in fonts:
                single_text = font.get_text()
                single_text=single_text.replace(u'\xa0', '') #替换&nbsp;，即html空格
                single_text = single_text.replace(' ','') #替换半角空格
                single_text = single_text.replace(chr(12288), '') #替换全角空格
                single_text = single_text.replace('\r','')
                single_text = single_text.replace('\n', '')
                single_text = single_text.replace('【内容简介】：', '')
                single_text = single_text.replace('【内容简介】', '')
                single_text = single_text.replace('内容简介：', '')
                single_text = single_text.replace('内容简介', '')
                single_text = single_text.replace('游客，如果您要查看本帖隐藏内容请回复','')
                if single_text not in visted_text:
                    visted_text.append(single_text)
                    abtracts+=single_text
            df.loc[i,'abtract']=abtracts
            print('--',i,'--',df.loc[i,'name'],'当前小说简介已录入')
        else:
            print('--',i,'--',df.loc[i,'name'],"当前小说没有简介")
        # time.sleep(random.randint(1, 3))
        time.sleep(0.5)
        df.to_csv('abooky_novels_(有简介).csv')
def linshi():
    df = pd.read_csv('abooky_novels_(有简介).csv')
    df.set_index(['Unnamed: 0'], inplace=True)
    for i in range(len(df)):
        if df.loc[i,'abtract']!='':
            df.loc[i,'abstract']=df.loc[i,'abtract']
    df.drop(axis=1,columns=['abtract'],inplace=True)
    df.columns=['name','url','abstract']
    print()
# get_abstract()
linshi()