#coding=utf-8
import url_manager2
import html_downloader
import html_parser
import html_output
import math
import mysql_oper
import traceback






class SpiderMain(object):
    def __init__(self):
        self.urls = url_manager2.UrlManager2()
        self.downloader = html_downloader.HtmlDownloader()
        self.parser = html_parser.HtmlParser()
        self.outputer = html_output.HtmlOutput()
        self.mysqloper = mysql_oper.MysqlOper()
    
    def craw(self, root_url):
        count =  1
        self.urls.add_new_url(root_url)
        while self.urls.has_new_url():
            try:
                tmp_url = self.urls.get_new_url()
                print 'craw %d'% count 
                #print 'craw %s'% new_url
                new_url = '%s'% tmp_url
                html_cont = self.downloader.download(new_url + '/home')
                new_data = self.parser.parse(new_url,html_cont)                
                self.outputer.collect_data(new_data)
                
                count_data = self.parser.parseCount(html_cont)
                fans_count = count_data['fans_count']
                fellow_count = count_data['fellow_count']
                
                fans_page_cnt = int(math.ceil(float(fans_count)/20)) + 1
                for i in range(1,fans_page_cnt):
                   
                    print 'fans_page_cnt %d,%d'%(i,fans_page_cnt)
                    html_cont_fans = self.downloader.download(new_url+'/fans?p='+str(i))
                    new_fans_urls = self.parser.parseUrl(html_cont_fans)
                    self.urls.add_new_urls(new_fans_urls)
                
                fellow_page_cnt = int(math.ceil(float(fellow_count)/20)) + 1
                for j in range(1,fellow_page_cnt):
                    print 'fellow_page_cnt %d,%d'%(j,fellow_page_cnt)
                    html_cont_fellow = self.downloader.download(new_url+'/fellow?p='+str(j))
                    new_fellow_urls = self.parser.parseUrl(html_cont_fellow)
                    self.urls.add_new_urls(new_fellow_urls)
                
                #if count == 4:
                #    break
                count = count + 1
            except Exception:
                print 'traceback.format_exc():\n%s' % traceback.format_exc()
                continue
        self.outputer.output_html()

if __name__=="__main__":
    root_url = "https://my.oschina.net/OQKuDOtsbYT2"
    obj_spider = SpiderMain()
    obj_spider.craw(root_url)
    