#获取贴吧分类目录
#对于分类目录贴吧数量超过600的 无法获取到后续贴吧 通过 爬去贴吧用户关注的贴吧列表来补充
import datetime
import time
import csv
from crawler import download
from bs4 import BeautifulSoup
import json
from pymongo import MongoClient
import urllib.parse
appkey=22080

#url='http://tieba.baidu.com/f/index/forumclass'
baseurl='http://tieba.baidu.com'


client=MongoClient('localhost',27017)
db=client.cache

rs={}
headers={}
headers['User-agent'] = 'wzwp'

if __name__ == '__main__':       
        
    for i in db.tiabamulu.find(no_cursor_timeout = True):
        urldetail=baseurl+i['_id']
        htmldetail=download(urldetail,headers=headers,proxy=None,num_retries=-1)
        print(urldetail)
        #获取尾页数字
        soupdetail=BeautifulSoup(htmldetail,'html.parser')
        lastpn=soupdetail.find_all(attrs={'class':'last'})
        cnt=0
        if lastpn:
            result= urllib.parse.urlparse(lastpn[0].attrs['href'])
            params=urllib.parse.parse_qs(result.query,True)
            lastpn=int(params['pn'][0])
            cnt=(lastpn-1)*20 #调整每页数量
            pagenum=3
        else:
            pagenum=2
            lastpn=1
        # for pn in range(1,2): #改成固定2页 因为超过数量 不能显示
        #     if cnt==0:
        #         urldetail=urldetail+'&pn='+str(pn)
        #     else:
        #         urldetail=urldetail+'&pn='+str(pn)+'&rn='+str(cnt)
            
        for pn in range(1,lastpn+1):          
            time.sleep(1)
            url=urldetail+'&pn='+str(pn)
            print(pn)

            htmldetail_detail=download(url,headers=headers,proxy=None,num_retries=3)
            soupdetail_detail=BeautifulSoup(htmldetail_detail,'html.parser')

            soup_href=soupdetail_detail.find_all(attrs={'class':'ba_href clearfix'})
            
            for d in soup_href:
                ba_href=d.attrs['href']                
                ba_img=d.next.attrs['src']
                divcontent=d.next.next
                ba_name=divcontent.contents[0].text
                ba_mnum=int(divcontent.contents[1].contents[0].text)
                ba_pnum=int(divcontent.contents[1].contents[1].text)
                ba_desc=divcontent.contents[2].text
                db.tiabalist.update({'_id':ba_href},{'$set':{'name':ba_name,'mnum':ba_mnum,'pnum':ba_pnum,'desc':ba_desc,'img':ba_img,'type':i['name'],'status':0}},upsert=True)
                #print('11')
            # soup_pic=soupdetail_detail.find_all(attrs={'class':'ba_pic'})
            # soup_name=soupdetail_detail.find_all(attrs={'class':'ba_name'})
            # ba_mnum=soupdetail_detail.find_all(attrs={'class':'ba_m_num'})
            # ba_pnum=soupdetail_detail.find_all(attrs={'class':'ba_p_num'})
            # ba_desc=soupdetail_detail.find_all(attrs={'class':'ba_desc'})
            
            #db.tiabamulu.update({'_id':href},{'$set':{'name':name}},upsert=True)
    #print(rs)
    print("Complete!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")



    #http://tieba.baidu.com/f/fdir?fd=%C6%E4%CB%FB&sd=%C6%E4%CB%FB&pn=40