#encoding=utf-8

from DownSource import * 
from ExtractList import *
from ExtractInfo import *
from StoreRecord import *
import sys

class Crawler :
    def __init__(self, site, page, fileName):
        self.site = site
        self.page = page
        self.fileName = fileName

    def __str__(self):
        return "I'm the crawler for : \"" + self.site.homeUrl + "\" And will record in " + self.fileName

    def process(self):
        #ds = DownSource(self.__homeUrl)
        #str = ds.get()

        
        # homeUrl = 'http://jingyan.baidu.com'
        #/goodExpList/0?pn=0 to 580
        for i in range(0, self.page):
            #tmp_url = self.__homeUrl + '/goodExpList/0?pn=' + str(i*20)
            tmp_url = self.site.getListUrl(i)
            ds = DownSource(tmp_url)
            src = ds.get()
            #print src
            
            el = ExtractList()
            dictt = el.get(self.site, src)
            #print dictt
            
            sr = StoreRecord()
            sr.toFile(dictt, self.fileName)
            print "Done : " + tmp_url

class Site :
    def __init__(self, listUrl, soupHtm, attrs, withFactor):
        self.listUrl = listUrl  
        self.soupHtm = soupHtm
        self.attrs = attrs
        self.withFactor = withFactor 
        
        ds = DownSource(listUrl.replace('*', '0'))
        src = ds.get()
        if src == "Exception" :
            print "Site.__init__() failed!"
            sys.exit()
        soup = BeautifulSoup(''.join(src))
        soup = soup.findAll(attrs = attrs)  #the attrs after equal sign is the variable
        self.count = len(soup)
        
        p = re.compile('[^(://)]*?://(.*?)/')
        m = p.match(listUrl)
        self.homeUrl = m.group(0)
        self.name = m.group(1)
        
        if self.count < 3 :
            raise ValueError("A list page contains items less than 3 ? How is it possible ? ( Now it's "+ str(self.count) +" )")

    def getListUrl(self, pageNum):
        if self.withFactor==True :
            return  self.listUrl.replace('*', str(pageNum  * self.count))
        else :
            return self.listUrl.replace('*', str(pageNum))


if __name__ == '__main__':
    

#    listUrl = "http://jingyan.baidu.com/goodExpList/0?pn=*"
#    soupHtm = ""
#    attrs = {'class' : 'f14 fbold'}
#    withFactor = True
#    page = 2

    

#    listUrl = "http://zhidao.baidu.com/browse/1063?lm=2&word=&pn=*"
#    soupHtm = ""
#    attrs = {'class' : 'title'}
#    withFactor = True
#    page = 2


#    listUrl = "http://zhidao.baidu.com/browse/182?lm=8704&word=%D6%D8%C7%EC&pn=*"
#    soupHtm = ""
#    attrs = {'class' : 'title'}
#    withFactor = True
#    page = 1

#    listUrl = "http://www.alibuybuy.com/category/it/page/*"
#    soupHtm = ".h3.a"
#    attrs = {'class' : 'postentry'}  
#    withFactor = False
#    page = 2

#    listUrl = "https://www.androidpit.com/en/android/apps/shop/BOOKS_AND_REFERENCE?si=*"
#    soupHtm = ""
#    attrs = {'class' : 'appname'}
#    withFactor = True
#    page = 3

#    listUrl = "http://www.androidpit.com/en/android/market/apps/list/1?si=*"
#    soupHtm = ""
#    attrs = {'class' : 'appname'}
#    withFactor = True
#    page = 3

#    listUrl = "http://www.baidu.com/s?wd=%BF%AA%C6%FB%B3%B5%B8%C4%D7%B0%B5%EA&pn=*"
#    soupHtm = ".a"
#    attrs = {'class' : 't'}
#    withFactor = True
#    page = 3
    


    site = Site(listUrl, soupHtm, attrs, withFactor)
    crawler = Crawler(site, page, "C:/"+site.name+".txt")
    print crawler
    crawler.process()
    print "over"
    
    
