#coding=utf-8
#import chilkat
import re
from BeautifulSoup import BeautifulSoup 
import datetime
import os, sys
import codecs
import urllib
import psycopg2
import chardet
from psycopg2 import ProgrammingError

DSN = 'dbname=postgres;username=postgres;password=041209'
CRAW_IFENSI_URL="http://www.ifensi.com/";
CRAW_XINHUA_URL="http://www.xinhuanet.com/sports/";
CRAW_CHINACBD_URL="http://www.china-cbn.com/";

SUCCESSFUL_CRAWAL_INSERT="insert into jj97_sitenews(sitename,originalurl,newscontent,newstitle,createdate,newsdate)values('%s','%s','%s','%s',current_timestamp,to_date('%s','YYYY-MM-DD'))";
FAILED_CRAWAL_INSERT="insert into jj97_CrawalFailedUrl(originalurl,failedtimestamp,errortype)values('%s',current_timestamp,'%s')";
UNIQUE_FAILED_SELECT="select id from jj97_CrawalFailedUrl where originalurl='%s'";
UNIQUE_XINHUA_SELECT="select * from jj97_sitenews where originalurl='%s'";

nowtime= datetime.datetime.now();
newdirname='%d%d%d%d'%(nowtime.year,nowtime.month,nowtime.day,nowtime.hour);
SITE_NEWSDATE = '%d-%d-%d'%(nowtime.year,nowtime.month,nowtime.day);
currentdir = os.getcwd();
conn = None;
urlread = lambda url:urllib.urlopen(url).read();

def obtainUrlEncoding(url):
    checkencoding = chardet.detect(urlread(url));
    
    return checkencoding['encoding'];

def crawIfensi():
    hrefPattern = re.compile(r'(\d{4})\D*(\d{2})\D*(\d{2})\D*');
    #spider = chilkat.CkSpider()
    url = "http://www.ifensi.com/";
    #spider.Initialize(url)

    #  Spider 5 URLs of this domain.
    #  but first, save the base domain in seenDomains
    for i in range(0,1):
        #success = spider.CrawlNext();
        
       # nowtime= datetime.datetime.now();
       # newfilename='%d%d%d%d%d%d%d'%(nowtime.year,nowtime.month,
       #                               nowtime.day,nowtime.hour,
       #                               nowtime.minute,nowtime.second,
       #                               nowtime.microsecond);
       # newfilename=newfilename+'.html';
        #seedUrls.Append(spider.lastUrl());
        lastedHtml = unicode(urllib.urlopen(url).read(),'utf-8');
       # try:
       #     fout = open(os.path.join(UPLOAD_DIR,newfilename), 'wb');
       #     fout.write(lastedHtml);
       #     fout.flush();
       # except IOError:
       #     print "ioerror";
       # finally:
       #     fout.close();
            
        soup = BeautifulSoup(lastedHtml);
        #print soup.originalEncoding;

        hots = soup.findAll(name='div',attrs={"id":"index_jiaodian"})
        for hot in hots:
            print hot.h1.a.next;
            hotsoup = BeautifulSoup(hot.prettify());
            hothrefs =  hotsoup.findAll(name="li");
            for hothref in hothrefs:
                hrefs = BeautifulSoup(hothref.prettify());
                links = hrefs.findAll(name="a");
                for link in links:
                    if(not link['href'].startswith("http://news")):
                        continue;
                    
                    hrefSearch = hrefPattern.search(link['href']);
                    #print hrefSearch;
                    if(hrefSearch!=None):
                        print hrefSearch.groups()[0],hrefSearch.groups()[1],hrefSearch.groups()[2]
                        
                    doc = urllib.urlopen(link['href']);
                    contenthref = BeautifulSoup(doc.read());
                    contentdivs = contenthref.findAll(name="div",attrs={"class":"main2"});
                    for contentdiv in contentdivs:
                        content = BeautifulSoup(contentdiv.prettify());
                        title = content.find(name="div",attrs={"id":"title"});
                        print title.h1.next;
                        newscontent = content.find(name="div",attrs={"id":"content"});
                        images = BeautifulSoup(newscontent.prettify());

                        imagearray = images.findAll(name="img");
                        for image in imagearray:
                            print image['src'];
                    #print link['href'],link.next;

        buguas = soup.findAll(name='div',attrs={"id":"index_bagua"})
        for bagua in buguas:
            baguasoup = BeautifulSoup(bagua.prettify());
            baguahrefs =  baguasoup.findAll(name="li");
            for baguahref in baguahrefs:
                hrefs = BeautifulSoup(baguahref.prettify());
                links = hrefs.findAll(name="a");
                for link in links:
                    print link['href'];
                    doc = urllib.urlopen(link['href']);
                    contenthref = BeautifulSoup(doc.read());
                    contentdivs = contenthref.findAll(name="div",attrs={"class":"main2"});
                    for contentdiv in contentdivs:
                        content = BeautifulSoup(contentdiv.prettify());
                        title = content.find(name="div",attrs={"id":"title"});
                        print title.h1.next;
                        newscontent = content.find(name="div",attrs={"id":"content"});

def crawChinaCbn():
    #chinacbnspider = chilkat.CkSpider();
    cbnurl = "http://www.china-cbn.com/";
    #chinacbnspider.Initialize(cbnurl);
    cbnhrefPattern = re.compile(r'(\d{4})(\d{2})(\d{2})\D*');
    cbnp = re.compile("<(\"[^\"]*\"|'[^']*'|[^'\">])*>");
    for i in range(0,1):
        #success = chinacbnspider.CrawlNext();
        lastedHtml = unicode(urllib.urlopen(cbnurl).read(),"utf-8");
            
        soup = BeautifulSoup(lastedHtml);

        ss = soup.findAll(name='table',attrs={"id":"Table10"})
        worldnewssoup = soup.findAll(name='table',attrs={"id":"Table13"})

        for worldnews in worldnewssoup:
            todaylinks = BeautifulSoup(worldnews.prettify());
            hreflinks = todaylinks.findAll(name="a");
            for href in hreflinks:
                todayhref = href['href'];
                hrefcontent = href.next;
                hrefSearch = cbnhrefPattern.search(todayhref);
                #if(hrefSearch!=None):
                #    print hrefSearch.groups()[0],hrefSearch.groups()[1],hrefSearch.groups()[2]

                #print url+todayhref;
                linkurl = "";
                doc = None;    
                if(todayhref.startswith("http://")):
                    linkurl = todayhref;
                else:    
                    linkurl = cbnurl+todayhref;
                    
                doc = urllib.urlopen(linkurl);
                print linkurl;
                contentdiv = BeautifulSoup(unicode(doc.read(),'utf-8'));
                #print contentdiv.originalEncoding;
                #print contentdiv.prettify();
                content = contentdiv.find(name='td',attrs={"style":"font-size:14px;padding:5"});

                print href['href'],href.next;
            

        for s in ss:
            todaylinks = BeautifulSoup(s.prettify());
            hreflinks = todaylinks.findAll(name="a");
            for href in hreflinks:
                todayhref = href['href'];
                hrefcontent = href.next;
                hrefSearch = cbnhrefPattern.search(todayhref);
                #if(hrefSearch!=None):
                #    print hrefSearch.groups()[0],hrefSearch.groups()[1],hrefSearch.groups()[2]

                #print url+todayhref;
                linkurl = "";
                doc = None;    
                if(todayhref.startswith("http://")):
                    linkurl = todayhref;
                else:    
                    linkurl = cbnurl+todayhref;
                doc = urllib.urlopen(linkurl);
                #print linkurl;
                contentdiv = BeautifulSoup(unicode(doc.read(),"gbk"));
                #print contentdiv.prettify();
                content = contentdiv.find(name='td',attrs={"style":"font-size:14px;padding:5"});

                #print href['href'],href.next;
                savecontent= cbnp.sub('',content.prettify());
            
        #if (success != True):
        #    print 'error!'
        #    break;
        
def crawXinhuaSport():
    xinhuahrefPattern = re.compile(r'(\d{4})\D*(\d{2})\D*(\d{2})\D*');
    cur = conn.cursor();
    for i in range(0,1):
        nowtime= datetime.datetime.now();
        lastedHtml = None;
        xinhuaencoding = "GBK";
       
        lastedHtml = unicode(urllib.urlopen("http://www.xinhuanet.com/sports/").read(),xinhuaencoding);

        soup = BeautifulSoup(lastedHtml);

        ss = soup.findAll(name='td',attrs={"height":"23"})

        for s in ss:
            div = BeautifulSoup(s.prettify());
            namediv= div.find(name='a',attrs={"class":"lan14"});
            if(namediv!=None):
                href = namediv["href"];
                #print div("b")[1];
                #print namediv,"\r\n,title",namediv.span.next;
                try:
                    cur.execute(UNIQUE_XINHUA_SELECT%(href));
                    onerow = cur.fetchone();
                    if(onerow is None):
                        doc = urllib.urlopen(href);
                        contentdiv = BeautifulSoup(unicode(doc.read(),xinhuaencoding));
                        hrefSearch = xinhuahrefPattern.search(href);
                        newsdate = SITE_NEWSDATE;
                        if(hrefSearch!=None):
                            newsdate=hrefSearch.groups()[0]+"-"+hrefSearch.groups()[1]+"-"+hrefSearch.groups()[2]
                
                        #print href,xinhuahrefPattern.search(href).groups();
                        content = contentdiv.find(name="div",attrs={"id":"Content"});
                        
                        #print '==========================\r\n';
                        titlediv = namediv.findAll(name='b');
                        saveSuccessUrl(CRAW_XINHUA_URL,href,unicode(str(content).strip(),'utf-8'),unicode(str(titlediv[1]),"utf-8"),newsdate);
                except UnicodeDecodeError:
                    print obtainUrlEncoding(href);
                    print 'UnicodeDecodeError:'+href;
                    conn.rollback();
                    saveFailedUrl(href,"UnicodeDecodeError");                    
                    continue;
                except Exception,err:
                    print 'unknowexception:'+href;
                    saveFailedUrl(href,unknowexception);
                    continue;
    cur.close();

def saveSuccessUrl(sitename,origialurl,content,urltitle,newsdate):
    try:
        localcur = conn.cursor();
        localcur.execute(SUCCESSFUL_CRAWAL_INSERT%(sitename,origialurl,content,urltitle,newsdate));
        conn.commit();
    except ProgrammingError:
        conn.rollback();
        saveFailedUrl(href,"ProgrammingError");
    finally:
        localcur.close();
    
def saveFailedUrl(href,errortype):
    localcur = conn.cursor();
    localcur.execute(UNIQUE_FAILED_SELECT%(href));
    onerow = localcur.fetchone();
    
    if(onerow is None):
        localcur.execute(FAILED_CRAWAL_INSERT%(href,errortype));
        conn.commit();
        
    localcur.close();

def crawNews():
    #crawIfensi();
    #crawChinaCbn();
    crawXinhuaSport();
    

conn =  psycopg2.connect("dbname='djangodb' user='postgres' host='localhost' password='041209'");
crawNews();
if(conn!=None):
    conn.close();

