#code:utf-8
import datetime
import time
import csv
from crawler import downloadnew
from bs4 import BeautifulSoup
import json
from pymongo import MongoClient
import urllib.parse
import re
import sys   
from MongoTiebaListQueue import MongoTiebaListQueue
import threading
import math
import requests
import io,sys
sys.setrecursionlimit(1000000) #例如这里设置为一百万 
baseurl='http://tieba.baidu.com/bawu2/platform/listMemberInfo?word={tiebaname}&ie=utf-8&pn={pagenumber}'
#baseurl='http://tieba.baidu.com'


client=MongoClient('localhost',27017)
db=client.cache

user_agent ='Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.22 Safari/537.36 SE 2.X MetaSr 1.0'
headers = {'User-Agent':user_agent}



def threaded_crawler(num_thread):
    queue=MongoTiebaListQueue()

    def process_queue():
        while True:
            try:
                tiebainfo=queue.pop()
                #print(tiebainfo[0],tiebainfo[1])
            except KeyError:
                break
            else:
                if tiebauserlist(tiebainfo[1]):
                    queue.complete(tiebainfo[0])
                else:
                    queue.uncomplete(tiebainfo[0])

    threads=[]
    while threads or queue:
        for thread in threads:
            if not thread.is_alive():
                threads.remove(thread)
        while len(threads) < num_thread and queue.peek():
            thread=threading.Thread(target=process_queue)
            thread.setDaemon(True)
            thread.start()
            threads.append(thread)
        
        time.sleep(1)



def tiebauserlist(tiebaname):
    name = urllib.parse.quote((tiebaname)[:-1])
    url=baseurl.format(tiebaname=name,pagenumber=1)
    #html=downloadnew(url,headers=headers, num_retries=-1,contentcode='gbk') 
    html=requests.get(url).text
    if html:
        soup=BeautifulSoup(html,'html.parser')
        pageinfo=soup.find_all(attrs={'class':'tbui_total_page'})
        pagestr=str(pageinfo[0].text).rstrip('页').lstrip('共')
        pagenum=int(pagestr)+1
        tiebausercount=0
        #time.sleep(1)
        for p in range(1,pagenum):
            url=baseurl.format(tiebaname=name,pagenumber=p)
            #html=downloadnew(url,headers=headers, num_retries=-1,contentcode='gbk')
            html=requests.get(url).text
            print(p)
            if html:

                soup=BeautifulSoup(html,'html.parser')
                ul=soup.find_all(attrs={'class':'user_name'})
                for li in ul:                    
                    username=li.text
                    if username:
                        userlevel=int((li.next.next_element.attrs['class'][1]).lstrip('bawu-info-lv'))
                        #save to user
                        #db.tiebauser.update({'_id':username},upsert=True)
                        #save to tiebauser
                        db.tiebauser_tieba.insert({'tiebaname':tiebaname,'user':username,'level':userlevel})
                    #tiebausercount=tiebausercount+1
        
    #     clss=soup.find_all(attrs={'class':'card_infoNum'})
    #     tiebausercount=clss[0].text
    #     m = re.findall(r'(\w*[0-9]+)\w*',tiebausercount)
    #     m=''.join(m)
    #     print('%s:%s'%(tiebaid,m))
    #     db.tiabalist.update({'_id':tiebaid},{'$set':{'pnum':int(m)}})
    #     return True
    # else:
    #     return False


if __name__ == '__main__':    
    #threaded_crawler(10)
    tiebauserlist('新宋吧')
    # with open("tiebalist.csv","w",newline="",encoding="utf-8") as csvfile:
    #     writer = csv.writer(csvfile,dialect = ("excel"))
    #     writer.writerow(['名称', '类型', '关注','帖子'])
    #     for i in db.tiabalist.find():
    #         data=[i['name'].replace(",",'-').replace("，",'-'),i['type'],i['mnum'],i['pnum']]
    #         writer.writerow(data)

    #     csvfile.close()

        #tiebauserlist(i['_id'],i['name'])
    #for i in db.tiabalist.find({'status':1}):              
    #     name = urllib.parse.quote((i['name'])[:-1])
    #     print('%s,%s'%(datetime.datetime.now(),name))
    #     pagenum=math.ceil(i['mnum']/24)
    #     for p in range(1,pagenum+1):
    #         url=baseurl.format(tiebaname=name,pagenumber=p)
    #         #print('%s,%s'%(url,name))
    #         db.tiebauserurl.update({'_id':url},{'$set':{'tieba':i['name'],'status':0}},upsert=True)
        
    #     db.tiabalist.update({'_id':i['_id']},{'$set':{'status':1}})
    # for i in db.proxy.find():
    #     db.proxy.update({'_id':i['_id']},{'$set':{'status':0}})
