#coding=utf-8
#import chilkat
import re
from BeautifulSoup import BeautifulSoup 
import datetime
import os, sys
import codecs
import urllib
import psycopg2
import chardet
from psycopg2 import ProgrammingError

DSN = 'dbname=postgres;username=postgres;password=postgres'
CRAW_IFENSI_URL="http://www.ifensi.com/";
CRAW_XINHUA_URL="http://www.xinhuanet.com/sports/";
CRAW_CHINACBN_URL="http://www.china-cbn.com/";

SUCCESSFUL_CRAWAL_INSERT="insert into jj97_sitenews(sitename,originalurl,newscontent,newstitle,createdate,newsdate,newstype,shuttitle)values('%s','%s','%s','%s',current_timestamp,to_date('%s','YYYY-MM-DD'),'%s','%s')";
FAILED_CRAWAL_INSERT="insert into jj97_CrawalFailedUrl(originalurl,failedtimestamp,errortype)values('%s',current_timestamp,'%s')";
UNIQUE_FAILED_SELECT="select id from jj97_CrawalFailedUrl where originalurl='%s'";
UNIQUE_SUCCESSFUL_SELECT="select * from jj97_sitenews where originalurl='%s'";

DEFAULT_ENCODING="gbk";

sitetime= datetime.datetime.now();
#newdirname='%d%d%d%d'%(nowtime.year,nowtime.month,nowtime.day,nowtime.hour);
SITE_NEWSDATE = '%d-%d-%d'%(sitetime.year,sitetime.month,sitetime.day);
#currentdir = os.getcwd();
#conn = None;
urlread = lambda url:urllib.urlopen(url).read();
replaceAllHTMLTag= lambda oldstring:re.sub("<(\"[^\"]*\"|'[^']*'|[^'\">])*>", "",oldstring);
replaceAllBlankChar = lambda oldstring:re.sub("\s","",oldstring);
class CrawalNews:
    conn = None;
    def __init__(self):
        pass;
    def obtainUrlEncoding(self,url):
        checkencoding = chardet.detect(urlread(url));
    
        return checkencoding['encoding'];

    def crawIfensi(self):
        hrefPattern = re.compile(r'(\d{4})\D*(\d{2})\D*(\d{2})\D*');
        ifensiencoding=DEFAULT_ENCODING;
        url = CRAW_IFENSI_URL;
        
        try:
            for i in range(0,1):
                lastedHtml = unicode(urllib.urlopen(url).read(),ifensiencoding);
            
                soup = BeautifulSoup(lastedHtml);
    
                hots = soup.findAll(name='div',attrs={"id":"index_jiaodian"})
                for hot in hots:
                    print hot.h1.a.next;
                    hotsoup = BeautifulSoup(hot.prettify());
                    hothrefs =  hotsoup.findAll(name="li");
                    for hothref in hothrefs:
                        hrefs = BeautifulSoup(hothref.prettify());
                        links = hrefs.findAll(name="a");
                        for link in links:
                            if(not link['href'].startswith("http://news") or urlIfExist(link['href'])==True):
                                continue;
                    
                            hrefSearch = hrefPattern.search(link['href']);
                            newsdate = SITE_NEWSDATE;
                    
                            if(hrefSearch!=None):
                                newsdate=hrefSearch.groups()[0]+"-"+hrefSearch.groups()[1]+"-"+hrefSearch.groups()[2]
                                                
                            try:
                                doc = urllib.urlopen(link['href']);
                                contenthref = BeautifulSoup(unicode(doc.read(),ifensiencoding));
                                contentdivs = contenthref.findAll(name="div",attrs={"class":"main2"});
                                for contentdiv in contentdivs:
                                    content = BeautifulSoup(contentdiv.prettify());
                                    title = content.find(name="div",attrs={"id":"title"});
                                    self.printLog(title.h1.next);
                                    newscontent = content.find(name="div",attrs={"id":"content"});
                                    self.saveSuccessUrl(CRAW_IFENSI_URL,link['href'],unicode(str(newscontent).strip(),'utf-8'),unicode(str(title.h1.next),"utf-8"),newsdate,'entertainment',unicode(str(title.h1.next),"utf-8"));                                    
    
                            except UnicodeDecodeError:
                                self.printLog(self.obtainUrlEncoding(link['href']));
                                self.printLog(self.printLog('UnicodeDecodeError:'+link['href']));
                                self.saveFailedUrl(link['href'],"UnicodeDecodeError");                    
                                continue;
                            except Exception,err:
                                print 'hot unknowexception:'+link['href'];
                                saveFailedUrl(link['href'],"unknowexception");
                                continue;
    
                buguas = soup.findAll(name='div',attrs={"id":"index_bagua"})
                for bagua in buguas:
                    baguasoup = BeautifulSoup(bagua.prettify());
                    baguahrefs =  baguasoup.findAll(name="li");
                    for baguahref in baguahrefs:
                        hrefs = BeautifulSoup(baguahref.prettify());
                        links = hrefs.findAll(name="a");
                        for link in links:
                            if(self.urlIfExist(link['href'])==True):
                                continue;
                            try:
                                doc = urllib.urlopen(link['href']);
                                hrefSearch = hrefPattern.search(link['href']);
                                newsdate = SITE_NEWSDATE;
                
                                if(hrefSearch!=None):
                                    newsdate=hrefSearch.groups()[0]+"-"+hrefSearch.groups()[1]+"-"+hrefSearch.groups()[2]
                            
                                contenthref = BeautifulSoup(unicode(doc.read(),ifensiencoding));
                                contentdivs = contenthref.findAll(name="div",attrs={"class":"main2"});
                                for contentdiv in contentdivs:
                                    content = BeautifulSoup(contentdiv.prettify());
                                    title = content.find(name="div",attrs={"id":"title"});
                                    newscontent = content.find(name="div",attrs={"id":"content"});
                                    self.saveSuccessUrl(CRAW_IFENSI_URL,link['href'],unicode(str(newscontent).strip(),'utf-8'),unicode(str(title.h1.next),"utf-8"),
                                                   newsdate,'entertainment',unicode(str(title.h1.next),"utf-8"));                                                                    
                            except UnicodeDecodeError:
                                self.printLog(obtainUrlEncoding(link['href']));
                                self.printLog('UnicodeDecodeError:'+link['href']);
                                self.saveFailedUrl(link['href'],"UnicodeDecodeError");                    
                                continue;
                            except Exception,err:
                                print 'bagua unknowexception:'+link['href'];
                                saveFailedUrl(link['href'],"unknowexception");
                                continue;
        except IOError:
            self.printLog("connect url is error:");        
        except UnicodeDecodeError:
            self.printLog("craw site encoding:"+self.obtainUrlEncoding(CRAW_IFENSI_URL));
    

    def crawChinaCbn(self):
        cbnurl = CRAW_CHINACBN_URL;
        cbnencoding="gbk";
        cbnhrefPattern = re.compile(r'(\d{4})(\d{2})(\d{2})\D*');
        cbnp = re.compile("<(\"[^\"]*\"|'[^']*'|[^'\">])*>");
        try:    
            for i in range(0,1):
                print self.obtainUrlEncoding(CRAW_CHINACBN_URL);
    
                lastedHtml = unicode(urllib.urlopen(cbnurl).read(),cbnencoding);
                soup = BeautifulSoup(lastedHtml);
    
                ss = soup.findAll(name='table',attrs={"id":"Table10"})
                worldnewssoup = soup.findAll(name='table',attrs={"id":"Table13"})
    
                for worldnews in worldnewssoup:
                    todaylinks = BeautifulSoup(worldnews.prettify());
                    hreflinks = todaylinks.findAll(name="a");
                    for href in hreflinks:
                        todayhref = href['href'];
                        hrefcontent = href.next;
                        hrefSearch = cbnhrefPattern.search(todayhref);
                        newsdate = SITE_NEWSDATE;
                    
                        if(hrefSearch!=None):
                            newsdate=hrefSearch.groups()[0]+"-"+hrefSearch.groups()[1]+"-"+hrefSearch.groups()[2]
    
                        linkurl = "";
                        doc = None;    
                        if(todayhref.startswith("http://")):
                            linkurl = todayhref;
                        else:    
                            linkurl = cbnurl+todayhref;
                        
                        try:    
                            onerow = self.urlIfExist(linkurl);
                    
                            if(onerow==False):
                                doc = urllib.urlopen(linkurl);
                                contentdiv = BeautifulSoup(unicode(doc.read(),cbnencoding));
                                content = contentdiv.find(name='td',attrs={"style":"font-size:14px;padding:5"});
                                title = contentdiv.find(name="font",attrs={"class":"NewsTitle"});
                            
                                #print linkurl,title.next;
                        
                                self.saveSuccessUrl(CRAW_CHINACBN_URL,linkurl,unicode(str(content).strip(),'utf-8'),unicode(str(title.next),"utf-8"),newsdate,'economy',unicode(str(title.next),"utf-8"));
                        except UnicodeDecodeError:
                            print self.obtainUrlEncoding(linkurl);
                            self.printLog('worldnews UnicodeDecodeError:%s'%(linkurl));
                            self.saveFailedUrl(linkurl,"UnicodeDecodeError");                    
                            continue;
                        except Exception,err:
                            self.printLog('worldnews unknowexception:%s'%(linkurl));
                            self.saveFailedUrl(linkurl,"unknowexception");
                            continue;
    
    
                for s in ss:
                    todaylinks = BeautifulSoup(s.prettify());
                    hreflinks = todaylinks.findAll(name="a");
                    for href in hreflinks:
                        todayhref = href['href'];
                        hrefcontent = href.next;
                        hrefSearch = cbnhrefPattern.search(todayhref);
                        newsdate = SITE_NEWSDATE;
                        
                        if(hrefSearch!=None):
                            newsdate=hrefSearch.groups()[0]+"-"+hrefSearch.groups()[1]+"-"+hrefSearch.groups()[2]
                    
                        linkurl = "";
                        doc = None;    
                        if(todayhref.startswith("http://")):
                            linkurl = todayhref;
                        else:    
                            linkurl = cbnurl+todayhref;
                        
                        try:
                            onerow = self.urlIfExist(linkurl);
                            if(onerow==False):
                                doc = urllib.urlopen(linkurl);
                                contentdiv = BeautifulSoup(unicode(doc.read(),"gbk"));
                                content = contentdiv.find(name='td',attrs={"style":"font-size:14px;padding:5"});
                                title = contentdiv.find(name="font",attrs={"class":"NewsTitle"});
                        
                                savecontent= cbnp.sub('',content.prettify());
                                self.saveSuccessUrl(CRAW_CHINACBN_URL,linkurl,unicode(str(savecontent).strip(),'utf-8'),unicode(str(title.next),"utf-8"),newsdate,'economy',unicode(str(title.next),"utf-8"));
                        except UnicodeDecodeError:
                            print obtainUrlEncoding(linkurl);
                            self.printLog("UnicodeDecodeError:%s"%(linkurl));
                            self.saveFailedUrl(linkurl,"UnicodeDecodeError");
                        except Exception,err:
                            self.printLog("unknowexception:%s"%(linkurl));
                            self.saveFailedUrl(linkurl,"unknowexception");
        except IOError:
            self.printLog("connect url is error:");        
        except UnicodeDecodeError:
            self.printLog("encoding:"+self.obtainUrlEncoding(CRAW_CHINACBD_URL));

    def crawXinhuaSport(self):
        xinhuahrefPattern = re.compile(r'(\d{4})\D*(\d{2})\D*(\d{2})\D*');
        try:
            for i in range(0,1):
                nowtime= datetime.datetime.now();
                lastedHtml = None;
                xinhuaencoding = "GBK";
       
                lastedHtml = unicode(urllib.urlopen("http://www.xinhuanet.com/sports/").read(),xinhuaencoding);
    
                soup = BeautifulSoup(lastedHtml);

                ss = soup.findAll(name='td',attrs={"height":"23"})

                for s in ss:
                    div = BeautifulSoup(s.prettify());
                    namediv= div.find(name='a',attrs={"class":"lan14"});
                    if(namediv!=None):
                        href = namediv["href"];
                        bsoup = BeautifulSoup(namediv.findAll(name='b')[1].prettify());
                        try:
                            onerow = self.urlIfExist(href);
                            if(not onerow):
                                doc = urllib.urlopen(href);
                                bsoup = BeautifulSoup(namediv.findAll(name='b')[1].prettify());
                                contentdiv = BeautifulSoup(unicode(doc.read(),xinhuaencoding));
                                hrefSearch = xinhuahrefPattern.search(href);
                                newsdate = SITE_NEWSDATE;
                                if(hrefSearch!=None):
                                    newsdate=hrefSearch.groups()[0]+"-"+hrefSearch.groups()[1]+"-"+hrefSearch.groups()[2]
                    
                                content = contentdiv.find(name="div",attrs={"id":"Content"});
                        
                                titlediv = namediv.findAll(name='b');
                                self.saveSuccessUrl(CRAW_XINHUA_URL,href,unicode(str(content).strip(),'utf-8'),unicode(str(titlediv[1]),"utf-8"),newsdate,'sports',unicode(str(bsoup),'utf-8'));
                        except UnicodeDecodeError:
                            self.printLog(self.obtainUrlEncoding(href));
                            self.printLog('crawXinhuaSport UnicodeDecodeError:%s'%(href));
                            self.saveFailedUrl(href,"UnicodeDecodeError");                    
                            continue;
                        except Exception,err:
                            self.printLog('crawXinhuaSport unknowexception:%s'%(href));
                            self.saveFailedUrl(href,'unknowexception');
                            continue;

        except IOError:
            self.printLog("connect url is error:");        
        except UnicodeDecodeError:
            self.printLog("encoding:"+self.obtainUrlEncoding("http://www.xinhuanet.com/sports/"));

    def saveSuccessUrl(self,sitename,origialurl,content,urltitle,newsdate,newstype,shuttitle):
        localconn = self.getConnection();
        content=content.replace("'","");
        urltitle=urltitle.replace("'","");
        localcur = None;

        while True:
            index=content.find("</script>");
            if(index==-1):
                break;

            index=index+len("</script>");
            content = content[index:];

        insertsql = SUCCESSFUL_CRAWAL_INSERT%(sitename,origialurl,content,replaceAllBlankChar(replaceAllHTMLTag(urltitle)),newsdate,newstype,replaceAllBlankChar(replaceAllHTMLTag(shuttitle)));
        try:
            localcur = localconn.cursor();
            localcur.execute(insertsql);
            localconn.commit();
            self.printLog("save successful!");
        except ProgrammingError:
            self.printLog("save failed!");
            localconn.rollback();
            self.saveFailedUrl(origialurl,"ProgrammingError");
        finally:
            if(localcur!=None):
                localcur.close();
    
    def saveFailedUrl(self,href,errortype):
        localconn = self.getConnection();
        try:
            localcur = localconn.cursor();
            localcur.execute(UNIQUE_FAILED_SELECT%(href));
            onerow = localcur.fetchone();
    
            if(onerow is None):
                localcur.execute(FAILED_CRAWAL_INSERT%(href,errortype));
                localconn.commit();
        except Exception,err:
            localconn.roleback();
        finally:    
            localcur.close();

    def crawNews(self):
        self.crawIfensi();
        self.crawChinaCbn();
        self.crawXinhuaSport();

    def urlIfExist(self,url):
        try:
            localcur = self.getConnection().cursor();
            localcur.execute(UNIQUE_SUCCESSFUL_SELECT%(url));
            bResult = True;

            onerow = localcur.fetchone();
            if(onerow is None):
                self.printLog("if exist!");
                bResult = False;
        finally:
            localcur.close();

        return bResult;
    

    def getConnection(self):
        if(self.conn is None):
            #self.conn =  psycopg2.connect("dbname='djangodb' user='postgres' host='localhost' password='041209'");
            self.conn =  psycopg2.connect("dbname='djangodb' user='postgres' host='localhost' password='postgres'");
            self.printLog('get new');
            return self.conn;
        else:
            self.printLog('get old');
            return self.conn;
        
    def printLog(self,msg):
        nowtime= datetime.datetime.now();
        newdirname='%d%d%d-%d'%(nowtime.year,nowtime.month,nowtime.day,nowtime.hour);

        print '%s message is:%s'%(newdirname,msg);

        
    def closeConnection(self):
        if(self.conn is None):
            pass;
        else:
            self.conn.close();



    """main function"""
if __name__=="__main__":
    crawal = CrawalNews();
    crawal.crawNews();
    crawal.closeConnection();
    

