#coding=utf-8

import chilkat
import re
from BeautifulSoup import BeautifulSoup 
import datetime
import os, sys
import codecs
import urllib
import httplib
#  The Chilkat Spider component/library is free.
spider = chilkat.CkSpider()
subspider = chilkat.CkSpider();

seenDomains = chilkat.CkStringArray()
seedUrls = chilkat.CkStringArray()

seenDomains.put_Unique(True)
seedUrls.put_Unique(True)
nowtime= datetime.datetime.now();
newdirname='%d%d%d%d'%(nowtime.year,nowtime.month,nowtime.day,nowtime.hour);

seedUrls.Append("http://www.china-cbn.com/")
#seedUrls.Append("http://www.redbaby.com.cn/bj/")
#seedUrls.Append("http://www.redbaby.com.cn/pc/")
#seedUrls.Append("http://home.redbaby.com.cn/")
#seedUrls.Append("http://health.redbaby.com.cn/")
#seedUrls.Append("http://cos.redbaby.com.cn/")
#seedUrls.Append("http://www.redbaby.com.cn/Product/ProductInfo_01_1066020.html")

#define gobal Constant
#UPLOAD_DIR = "d://tmp//redbaby//"
#PATTERN = "^(http://img.redbaby.com.cn/UpFile/Images/Product)";

# Add some must-match patterns:
#spider.AddMustMatchPattern("*redbaby.com.cn")
#spider.AddMustMatchPattern("*.net/*")

# Add some avoid-patterns:
#spider.AddAvoidOutboundLinkPattern("*.tenpay.*")

#spider.AddAvoidOutboundLinkPattern("*~*")


#  Use a cache so we don't have to re-fetch URLs previously fetched.
#nowtime= datetime.datetime.now();
#newdirname='%d%d%d%d'%(nowtime.year,nowtime.month,nowtime.day,nowtime.hour);
#cachedir = "c:/spiderCache/xinhuanet/"+newdirname+""

#spider.put_CacheDir(cachedir)
#spider.put_FetchFromCache(True)
#spider.put_UpdateCache(True)

subspider.Initialize("http://www.china-cbn.com/")
#httplib.HTTPConnection.debuglevel = 1 

imagecount = 0;
hrefPattern = re.compile(r'(\d{4})(\d{2})(\d{2})\D*');
p = re.compile("<(\"[^\"]*\"|'[^']*'|[^'\">])*>");
while True:
    if(seedUrls.get_Count()==0):
        break;

    url = seedUrls.pop()
    spider.Initialize(url)
    print url;
    #print "================================================";

    #  Spider 5 URLs of this domain.
    #  but first, save the base domain in seenDomains
    domain = spider.getDomain(url)
    seenDomains.Append(spider.getBaseDomain(domain))
    #loopcount = spider.get_NumUnspidered();

    for i in range(0,1):
        success = spider.CrawlNext();
        
       # nowtime= datetime.datetime.now();
       # newfilename='%d%d%d%d%d%d%d'%(nowtime.year,nowtime.month,
       #                               nowtime.day,nowtime.hour,
       #                               nowtime.minute,nowtime.second,
       #                               nowtime.microsecond);
       # newfilename=newfilename+'.html';
        #seedUrls.Append(spider.lastUrl());
        lastedHtml = spider.lastHtml();
        if(success==True):
            subspider.AddUnspidered(spider.lastUrl());

       # try:
       #     fout = open(os.path.join(UPLOAD_DIR,newfilename), 'wb');
       #     fout.write(lastedHtml);
       #     fout.flush();
       # except IOError:
       #     print "ioerror";
       # finally:
       #     fout.close();
            
        soup = BeautifulSoup(lastedHtml);
        #print soup.originalEncoding;

        ss = soup.findAll(name='table',attrs={"id":"Table10"})
        worldnewssoup = soup.findAll(name='table',attrs={"id":"Table13"})
        #print ss;

        for worldnews in worldnewssoup:
            todaylinks = BeautifulSoup(worldnews.prettify());
            hreflinks = todaylinks.findAll(name="a");
            for href in hreflinks:
                todayhref = href['href'];
                hrefcontent = href.next;
                hrefSearch = hrefPattern.search(todayhref);
                #if(hrefSearch!=None):
                #    print hrefSearch.groups()[0],hrefSearch.groups()[1],hrefSearch.groups()[2]

                #print url+todayhref;
                linkurl = "";
                doc = None;    
                if(todayhref.startswith("http://")):
                    linkurl = todayhref;
                else:    
                    linkurl = url+todayhref;
                doc = urllib.urlopen(linkurl);
                print linkurl;
                contentdiv = BeautifulSoup(doc.read());
                #print contentdiv.originalEncoding;
                #print contentdiv.prettify();
                content = contentdiv.find(name='td',attrs={"style":"font-size:14px;padding:5"});

                #print href['href'],href.next;
                #if(contentdiv.originalEncoding=='windows-1252'):
                #    print unicode(p.sub('',content.prettify()),'1252');
                #print p.sub('',content.prettify());
                #print "=============================";
            

        for s in ss:
            todaylinks = BeautifulSoup(s.prettify());
            hreflinks = todaylinks.findAll(name="a");
            for href in hreflinks:
                todayhref = href['href'];
                hrefcontent = href.next;
                hrefSearch = hrefPattern.search(todayhref);
                #if(hrefSearch!=None):
                #    print hrefSearch.groups()[0],hrefSearch.groups()[1],hrefSearch.groups()[2]

                #print url+todayhref;
                linkurl = "";
                doc = None;    
                if(todayhref.startswith("http://")):
                    linkurl = todayhref;
                else:    
                    linkurl = url+todayhref;
                doc = urllib.urlopen(linkurl);
                #print linkurl;
                contentdiv = BeautifulSoup(doc.read());
                #print contentdiv.prettify();
                content = contentdiv.find(name='td',attrs={"style":"font-size:14px;padding:5"});

                #print href['href'],href.next;
                savecontent= p.sub('',content.prettify());
            
        if (success != True):
            print 'error!'
            break;

        #  Display the URL we just crawled.
        #print spider.lastUrl()

        #  If the last URL was retrieved from cache,
        #  we won't wait.  Otherwise we'll wait 1 second
        #  before fetching the next URL.
        if (spider.get_LastFromCache() != True):
            spider.SleepMs(1000)
            
        #  Add the outbound links to seedUrls, except
        #  for the domains we've already seen.
       # for i in range(0,spider.get_NumOutboundLinks()):
         #   url = spider.getOutboundLink(i)
        #    domain = spider.getDomain(url)
         #   baseDomain = spider.getBaseDomain(domain)
         #   if (not seenDomains.Contains(baseDomain)):
         #       seedUrls.Append(url)

            #  Don't let our list of seedUrls grow too large.
          #  if (seedUrls.get_Count() > 1000):
         #       break

#print subspider.getUnspideredUrl();         
