#coding=utf-8

import chilkat
import re
from BeautifulSoup import BeautifulSoup 
import datetime
import os, sys
import codecs
#  The Chilkat Spider component/library is free.
spider = chilkat.CkSpider()
subspider = chilkat.CkSpider();

seenDomains = chilkat.CkStringArray()
seedUrls = chilkat.CkStringArray()

seenDomains.put_Unique(True)
seedUrls.put_Unique(True)
nowtime= datetime.datetime.now();
newdirname='%d%d%d%d'%(nowtime.year,nowtime.month,nowtime.day,nowtime.hour);

seedUrls.Append("http://www.xinhuanet.com/sports/")
#seedUrls.Append("http://www.redbaby.com.cn/bj/")
#seedUrls.Append("http://www.redbaby.com.cn/pc/")
#seedUrls.Append("http://home.redbaby.com.cn/")
#seedUrls.Append("http://health.redbaby.com.cn/")
#seedUrls.Append("http://cos.redbaby.com.cn/")
#seedUrls.Append("http://www.redbaby.com.cn/Product/ProductInfo_01_1066020.html")

#define gobal Constant
UPLOAD_DIR = "d://tmp//redbaby//"
PATTERN = "^(http://img.redbaby.com.cn/UpFile/Images/Product)";

# Add some must-match patterns:
#spider.AddMustMatchPattern("*redbaby.com.cn")
#spider.AddMustMatchPattern("*.net/*")

# Add some avoid-patterns:
spider.AddAvoidOutboundLinkPattern("*.tenpay.*")

#spider.AddAvoidOutboundLinkPattern("*~*")


#  Use a cache so we don't have to re-fetch URLs previously fetched.
nowtime= datetime.datetime.now();
newdirname='%d%d%d%d'%(nowtime.year,nowtime.month,nowtime.day,nowtime.hour);
cachedir = "c:/spiderCache/xinhuanet/"+newdirname+""

#spider.put_CacheDir(cachedir)
#spider.put_FetchFromCache(True)
#spider.put_UpdateCache(True)

subspider.Initialize("http://www.xinhuanet.com/sports/")

imagecount = 0;
hrefPattern = re.compile(r'(\d{4})\D*(\d{2})\D*(\d{2})\D*');
while True:
    if(seedUrls.get_Count()==0):
        break;

    url = seedUrls.pop()
    spider.Initialize(url)
    #print url;
    #print "================================================";

    #  Spider 5 URLs of this domain.
    #  but first, save the base domain in seenDomains
    domain = spider.getDomain(url)
    seenDomains.Append(spider.getBaseDomain(domain))
    #loopcount = spider.get_NumUnspidered();

    for i in range(0,5):
        success = spider.CrawlNext();
        
        nowtime= datetime.datetime.now();
       # newfilename='%d%d%d%d%d%d%d'%(nowtime.year,nowtime.month,
       #                               nowtime.day,nowtime.hour,
       #                               nowtime.minute,nowtime.second,
       #                               nowtime.microsecond);
       # newfilename=newfilename+'.html';
        #seedUrls.Append(spider.lastUrl());
        lastedHtml = spider.lastHtml();
        if(success==True):
            subspider.AddUnspidered(spider.lastUrl());

       # try:
       #     fout = open(os.path.join(UPLOAD_DIR,newfilename), 'wb');
       #     fout.write(lastedHtml);
       #     fout.flush();
       # except IOError:
       #     print "ioerror";
       # finally:
       #     fout.close();
            
        soup = BeautifulSoup(lastedHtml);
        #print soup.originalEncoding;

        ss = soup.findAll(name='td',attrs={"height":"23"})

        for s in ss:
            #print s;
            div = BeautifulSoup(s.prettify());
            namediv= div.find(name='a',attrs={"class":"lan14"});
            if(namediv!=None):
                #print namediv["href"];
                import urllib;
                href = namediv["href"];
                doc = urllib.urlopen(href);
                contentdiv = BeautifulSoup(doc.read());
                hrefSearch = hrefPattern.search(href);
                if(hrefSearch!=None):
                    print hrefSearch.groups()[0],hrefSearch.groups()[1],hrefSearch.groups()[2]
                
                print href,hrefPattern.search(href).groups();
                content = contentdiv.find(name="div",attrs={"id":"Content"});

                #if(content!=None):
                #print content;
                
                #print '==========================\r\n';
                titlediv = namediv.findAll(name='b');
                ###xinhuanet
                #for i in range(len(titlediv)):
                #    print i,titlediv[i],href;
                #print titlediv[1].next;
         #   pricediv= div.find(name='div',attrs={"class":"singleprice"});
         #   imagediv= (div.find(name='div',attrs={"class":"singletu"})).find(name='img')["src"];
         #   marketprice = pricediv.find(name='strike');
         #   redbabyprice = pricediv.find(name='b')
         #   print redbabyprice.next;
            #print namediv;
            #if namediv!=None:
            #    print namediv.next;
         #   print namediv.find(name='a').next;
        #for s in ss:
         #   imagename= BeautifulSoup(s.prettify()).img["src"];

          #  if re.match(PATTERN,imagename):
           #     import urllib;
            #    imgserver = urllib.urlopen(imagename);
             #   try:
              #      newimagename = '%d%d%d%d%d%d%d%d'%(nowtime.year,nowtime.month,
               #                                        nowtime.day,nowtime.hour,
                #                                       nowtime.minute,nowtime.second,
                 #                                      nowtime.microsecond,imagecount);
                  #  newimagename = newimagename+imagename[imagename.rindex('.'):];
                    
                   # fimgeout = open(os.path.join(UPLOAD_DIR,newimagename), 'wb');
                    #fimgeout.write(imgserver.read());
                    #fimgeout.flush();
                    
                    #print imagename;
                    #print newimagename;
                    #print 'save image!';
                    
                    #imagecount=imagecount+1;
                    #print 'saveimage successful!'
                    #print imagecount;
                #except IOError:
                #    print "ioerror";
                #finally:
                #    fimgeout.close();
                    
                #print imagename
            
            
        if (success != True):
            print 'error!'
            break;

        #  Display the URL we just crawled.
        #print spider.lastUrl()

        #  If the last URL was retrieved from cache,
        #  we won't wait.  Otherwise we'll wait 1 second
        #  before fetching the next URL.
        if (spider.get_LastFromCache() != True):
            spider.SleepMs(1000)
            
        #  Add the outbound links to seedUrls, except
        #  for the domains we've already seen.
       # for i in range(0,spider.get_NumOutboundLinks()):
         #   url = spider.getOutboundLink(i)
        #    domain = spider.getDomain(url)
         #   baseDomain = spider.getBaseDomain(domain)
         #   if (not seenDomains.Contains(baseDomain)):
         #       seedUrls.Append(url)

            #  Don't let our list of seedUrls grow too large.
          #  if (seedUrls.get_Count() > 1000):
         #       break

#print subspider.getUnspideredUrl();         
