#coding=utf-8
#import chilkat
import re
from BeautifulSoup import BeautifulSoup 
import datetime
import os, sys
import codecs
import urllib

nowtime= datetime.datetime.now();
newdirname='%d%d%d%d'%(nowtime.year,nowtime.month,nowtime.day,nowtime.hour);
currentdir = os.getcwd();

def crawIfensi():
    hrefPattern = re.compile(r'(\d{4})\D*(\d{2})\D*(\d{2})\D*');
    #spider = chilkat.CkSpider()
    url = "http://www.ifensi.com/";
    #spider.Initialize(url)

    #  Spider 5 URLs of this domain.
    #  but first, save the base domain in seenDomains
    for i in range(0,1):
        #success = spider.CrawlNext();
        
       # nowtime= datetime.datetime.now();
       # newfilename='%d%d%d%d%d%d%d'%(nowtime.year,nowtime.month,
       #                               nowtime.day,nowtime.hour,
       #                               nowtime.minute,nowtime.second,
       #                               nowtime.microsecond);
       # newfilename=newfilename+'.html';
        #seedUrls.Append(spider.lastUrl());
        lastedHtml = unicode(urllib.urlopen(url).read(),'gbk');
       # try:
       #     fout = open(os.path.join(UPLOAD_DIR,newfilename), 'wb');
       #     fout.write(lastedHtml);
       #     fout.flush();
       # except IOError:
       #     print "ioerror";
       # finally:
       #     fout.close();
            
        soup = BeautifulSoup(lastedHtml);
        #print soup.originalEncoding;

        hots = soup.findAll(name='div',attrs={"id":"index_jiaodian"})
        for hot in hots:
            print hot.h1.a.next;
            hotsoup = BeautifulSoup(hot.prettify());
            hothrefs =  hotsoup.findAll(name="li");
            for hothref in hothrefs:
                hrefs = BeautifulSoup(hothref.prettify());
                links = hrefs.findAll(name="a");
                for link in links:
                    if(not link['href'].startswith("http://news")):
                        continue;
                    
                    hrefSearch = hrefPattern.search(link['href']);
                    #print hrefSearch;
                    if(hrefSearch!=None):
                        print hrefSearch.groups()[0],hrefSearch.groups()[1],hrefSearch.groups()[2]
                        
                    doc = urllib.urlopen(link['href']);
                    contenthref = BeautifulSoup(doc.read());
                    contentdivs = contenthref.findAll(name="div",attrs={"class":"main2"});
                    for contentdiv in contentdivs:
                        content = BeautifulSoup(contentdiv.prettify());
                        title = content.find(name="div",attrs={"id":"title"});
                        print title.h1.next;
                        newscontent = content.find(name="div",attrs={"id":"content"});
                        images = BeautifulSoup(newscontent.prettify());

                        imagearray = images.findAll(name="img");
                        for image in imagearray:
                            print image['src'];
                    #print link['href'],link.next;

        buguas = soup.findAll(name='div',attrs={"id":"index_bagua"})
        for bagua in buguas:
            baguasoup = BeautifulSoup(bagua.prettify());
            baguahrefs =  baguasoup.findAll(name="li");
            for baguahref in baguahrefs:
                hrefs = BeautifulSoup(baguahref.prettify());
                links = hrefs.findAll(name="a");
                for link in links:
                    print link['href'];
                    doc = urllib.urlopen(link['href']);
                    contenthref = BeautifulSoup(doc.read());
                    contentdivs = contenthref.findAll(name="div",attrs={"class":"main2"});
                    for contentdiv in contentdivs:
                        content = BeautifulSoup(contentdiv.prettify());
                        title = content.find(name="div",attrs={"id":"title"});
                        print title.h1.next;
                        newscontent = content.find(name="div",attrs={"id":"content"});
                   
            
        #if (success != True):
        #    print 'error!'
        #    break;

        
        #if (spider.get_LastFromCache() != True):
        #    spider.SleepMs(1000)

def crawChinaCbn():
    #chinacbnspider = chilkat.CkSpider();
    cbnurl = "http://www.china-cbn.com/";
    #chinacbnspider.Initialize(cbnurl);
    cbnhrefPattern = re.compile(r'(\d{4})(\d{2})(\d{2})\D*');
    cbnp = re.compile("<(\"[^\"]*\"|'[^']*'|[^'\">])*>");
    for i in range(0,1):
        #success = chinacbnspider.CrawlNext();
        lastedHtml = unicode(urllib.urlopen(cbnurl).read(),"gbk");
            
        soup = BeautifulSoup(lastedHtml);

        ss = soup.findAll(name='table',attrs={"id":"Table10"})
        worldnewssoup = soup.findAll(name='table',attrs={"id":"Table13"})

        for worldnews in worldnewssoup:
            todaylinks = BeautifulSoup(worldnews.prettify());
            hreflinks = todaylinks.findAll(name="a");
            for href in hreflinks:
                todayhref = href['href'];
                hrefcontent = href.next;
                hrefSearch = cbnhrefPattern.search(todayhref);
                #if(hrefSearch!=None):
                #    print hrefSearch.groups()[0],hrefSearch.groups()[1],hrefSearch.groups()[2]

                #print url+todayhref;
                linkurl = "";
                doc = None;    
                if(todayhref.startswith("http://")):
                    linkurl = todayhref;
                else:    
                    linkurl = cbnurl+todayhref;
                    
                doc = urllib.urlopen(linkurl);
                print linkurl;
                contentdiv = BeautifulSoup(unicode(doc.read(),'gbk'));
                #print contentdiv.originalEncoding;
                #print contentdiv.prettify();
                content = contentdiv.find(name='td',attrs={"style":"font-size:14px;padding:5"});

                print href['href'],href.next;
                #if(contentdiv.originalEncoding=='windows-1252'):
                #    print unicode(p.sub('',content.prettify()),'1252');
                #print p.sub('',content.prettify());
                #print "=============================";
            

        for s in ss:
            todaylinks = BeautifulSoup(s.prettify());
            hreflinks = todaylinks.findAll(name="a");
            for href in hreflinks:
                todayhref = href['href'];
                hrefcontent = href.next;
                hrefSearch = cbnhrefPattern.search(todayhref);
                #if(hrefSearch!=None):
                #    print hrefSearch.groups()[0],hrefSearch.groups()[1],hrefSearch.groups()[2]

                #print url+todayhref;
                linkurl = "";
                doc = None;    
                if(todayhref.startswith("http://")):
                    linkurl = todayhref;
                else:    
                    linkurl = cbnurl+todayhref;
                doc = urllib.urlopen(linkurl);
                #print linkurl;
                contentdiv = BeautifulSoup(unicode(doc.read(),"gbk"));
                #print contentdiv.prettify();
                content = contentdiv.find(name='td',attrs={"style":"font-size:14px;padding:5"});

                #print href['href'],href.next;
                savecontent= cbnp.sub('',content.prettify());
            
        #if (success != True):
        #    print 'error!'
        #    break;
        
def crawXinhuaSport():
    xinhuahrefPattern = re.compile(r'(\d{4})\D*(\d{2})\D*(\d{2})\D*');
    #xinhuasportspider = chinacbnspider = chilkat.CkSpider();
    #xinhuasportspider.Initialize("http://www.xinhuanet.com/sports/");
    for i in range(0,1):
        #success = xinhuasportspider.CrawlNext();
        
        nowtime= datetime.datetime.now();
        #lastedHtml = xinhuasportspider.lastHtml();
        lastedHtml = unicode(urllib.urlopen("http://www.xinhuanet.com/sports/").read(),"gbk");

        soup = BeautifulSoup(lastedHtml);
        #print soup.originalEncoding;

        ss = soup.findAll(name='td',attrs={"height":"23"})

        for s in ss:
            div = BeautifulSoup(s.prettify());
            namediv= div.find(name='a',attrs={"class":"lan14"});
            if(namediv!=None):
                href = namediv["href"];
                print div("b")[1];
                #print namediv,"\r\n,title",namediv.span.next;
                doc = urllib.urlopen(href);
                contentdiv = BeautifulSoup(unicode(doc.read(),"gbk"));
                hrefSearch = xinhuahrefPattern.search(href);
                #if(hrefSearch!=None):
                #    print hrefSearch.groups()[0],hrefSearch.groups()[1],hrefSearch.groups()[2]
                
                #print href,xinhuahrefPattern.search(href).groups();
                content = contentdiv.find(name="div",attrs={"id":"Content"});

                if(content!=None):
                    print content.string;
                
                #print '==========================\r\n';
                titlediv = namediv.findAll(name='b');
            
        #if (success != True):
        #    print 'error!'
        #    break;

#sys.stdout = codecs.lookup('gbk')[-1](sys.stdout)
#crawIfensi();
#crawChinaCbn();
crawXinhuaSport();

