import urllib3
from html.parser import HTMLParser
import time

charset="utf-8"
indexlist='https://www.biqugg.com/xs/226/'
url=""
clist=[]
fl=False
try:
    listfile=open("temp.list.txt","r")
except BaseException:
    print("load List file error!")

for line in listfile.readlines():
    #print(line)
    clist.append(line.replace("\n", ""))
print(clist)

http=urllib3.PoolManager()



file=open("temp.txt","wb+")
#print(f.data.decode('gb2312', 'replace'))
class getlist(HTMLParser):
    list=[]
    clist=[]
    attrs=[]
    f=False
    text=''
    def handle_starttag(self, tag, attrs):
        if attrs != [] :
            self.attrs=attrs
        self.list.append(tag)
        #print("Encountered a start tag:", tag,self.list)
        #print('attrs:',attrs);

    def handle_endtag(self, tag):
        self.list.pop(-1)
        #print("Encountered an end tag :", tag)

    def handle_data(self, data):
        if ('id', 'list') in self.attrs : #列表起始标记
            self.f=True
        if ('id', 'footer') in self.attrs : #列表结束标记
            self.f=False
        if( self.f and self.list[-1]=="a") : 
            #print(self.attrs)
            self.clist.append(self.attrs[0][1])




class MyHTMLParser(HTMLParser):
    list=[]
    flags=[]
    text=''
    def handle_starttag(self, tag, attrs):
        if attrs != [] :
            self.flags=attrs
        self.list.append(tag)
        #print("Encountered a start tag:", tag,self.list)
        #print('attrs:',attrs);

    def handle_endtag(self, tag):
        if len(self.list) == 0:
            return False
        self.list.pop(-1)
        #print("Encountered an end tag :", tag)

    def handle_data(self, data):
        global fl
        global zj
        if len(self.list) == 0:
            return False
        '''
        if ('class', 'lf') in self.flags : #获取标题 
            #print("Encountered some data  :", data)
            if self.list[-1] == 'div':
                self.text=data+"\n"
                print(data)

        '''
        if self.list[-1] == 'h1': #获取标题 
            #self.text="\n第{0}章 {1}\n".format(zj,data)
            self.text="\n"+data+"\n"
            print(data)

        if('id', 'content') in self.flags : #获取内容 #列表起始标记
            #self.text=self.text+data
            fl=True
        if(fl):
            self.text=self.text+data
            pass
    
        if('class', 'page_chapter') in self.flags : #获取内容 #列表结束标记
            fl=False

        #print("Encountered some data  :", data)
        

parser = MyHTMLParser()
#parser.feed(f.data.decode('gb2312', 'replace'))
#print(parser.text)
'''
f=http.request('get',indexlist)
list=getlist()
list.feed(f.data.decode(charset, 'replace'))
print(list.clist)
clist=list.clist
'''

begintime=time.time()*1000
zj=1
for index in clist:
    btime=time.time()*1000
    print(index,"(",clist.index(index)+1,"/",len(clist),")")
    f=http.request('get',url+index)
    time.sleep(1)
    parser.feed(f.data.decode(charset, 'replace'))
    file.write(parser.text.encode('utf8'))
    tt=time.time()*1000
    t1=(tt-begintime)/1000
    t2=tt-btime
    t3=(tt-begintime)/((clist.index(index)+1)*1000)
    t4=(len(clist)-clist.index(index))*t3
    print("总耗时：%.3f秒  本次耗时：%.2f毫秒  平均耗时：%.3f秒"%(t1,t2,t3))
    print("预计剩余时间：%d分%.3f秒\n"%(int(t4/60),t4-int(t4/60)*60))
    zj+=1
    time.sleep(0.5)
