#!/usr/bin/env python -W ignore
# -*- coding: utf-8 -*-
# Author: bruce
# Email: lixiangning888@gmail.com
# Description: collect news from news.baidu.com and store into mysql databases
# If any problem in the code, please sent an email 

import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import re
import pprint
import feedparser
import encodings
import MySQLdb
from calculate_keywords_frequency import keywords_frequency

pp = pprint.PrettyPrinter()

#encodings.aliases.aliases['gb2312'] = 'gb18030'

#python_wiki_rss_url = "http://news.baidu.com/n?cmd=4&class=internews&tn=rss"

#feed = feedparser.parse( python_wiki_rss_url )

#print(feed.encoding)

#for news in feed['entries']:
#    print news.title
#    print news.author
#    print news.updated
#    print news.summary
#    print news.links[0]['href']

class baidu:
    def __init__(self):
        self.url = ['http://news.baidu.com/n?cmd=4&class=civilnews&tn=rss',
                    'http://news.baidu.com/n?cmd=4&class=internews&tn=rss',
                    'http://news.baidu.com/n?cmd=4&class=mil&tn=rss',
                    'http://news.baidu.com/n?cmd=4&class=finannews&tn=rss',
                    'http://news.baidu.com/n?cmd=4&class=internet&tn=rss',
                    'http://news.baidu.com/n?cmd=4&class=housenews&tn=rss',
                    'http://news.baidu.com/n?cmd=4&class=autonews&tn=rss',
                    'http://news.baidu.com/n?cmd=4&class=sportnews&tn=rss',
                    'http://news.baidu.com/n?cmd=4&class=enternews&tn=rss',
                    'http://news.baidu.com/n?cmd=4&class=gamenews&tn=rss',
                    'http://news.baidu.com/n?cmd=4&class=edunews&tn=rss',
                    'http://news.baidu.com/n?cmd=4&class=healthnews&tn=rss',
                    'http://news.baidu.com/n?cmd=4&class=technnews&tn=rss',
                    'http://news.baidu.com/n?cmd=4&class=socianews&tn=rss']
        encodings.aliases.aliases['gb2312'] = 'gb18030'
        self.db = self.initDB()
    
    def initDB(self):
        '''init database and creat table if first time run this script'''
        conn = MySQLdb.connect(host="localhost",user="root",passwd="root",db="news",charset="utf8")
        db = conn.cursor()
        '''create table for news check already collect or not'''
        collect_repeat_sql = '''create table if not exists `collect_repeat`
                             (`id` int(20) not null primary key auto_increment,
                             `url` text)
                             DEFAULT CHARSET=utf8'''
        db.execute(collect_repeat_sql)
        db.execute('commit')
        news_sql = '''create table if not exists `news`
                   (`id` int(20) not null primary key auto_increment,
                   `title` text,
                   `author` text,
                   `time` text,
                   `content` text,
                   `tag` text,
                   `rel_link` text)
                   DEFAULT CHARSET=utf8'''
        db.execute(news_sql)
        db.execute('commit')
        return db
    
    def insert_symbol(self,link):
        insert_sql = '''insert into collect_repeat values('','%s')''' % link
        self.db.execute(insert_sql)
        self.db.execute('commit')
    
    def insert_news(self,title="",author="",time="",content="",tag="",link=""):
        news = '''insert into news values('','%s','%s','%s','%s','%s','%s')''' % (title,author,time,re.escape(content),tag,link)
        #print news
        self.db.execute(news)
        self.db.execute('commit')
        
    def check_already_exist(self):
        check_sql = '''Select url from collect_repeat'''
        #c = self.db.cursor()
        #c.execute(check_sql)
        self.db.execute(check_sql)
        r=[i[0] for i in self.db.fetchall()]
        #pp.pprint(r)
        #exit
        #number = result.num_rows()
        return r

    def strip_tags(self, html):
        a = re.compile('\r+|\n+|\t+')
        h = re.compile('<[^>]+>')
        b = a.sub("",html)
        content = h.sub("",b)
        return content

    def process_news(self,link):
        feed = feedparser.parse( link )
        #kf = keywords_frequency()
        url_list = self.check_already_exist()
        for news in feed['entries']:
            #print news.links[0]['href'] in url_list
            if news.links[0]['href'] in url_list:
                continue
            else:
                #print news.title
                #print news.author
                #print news.updated
                #print news.summary
                #print news.links[0]['href']
                self.insert_symbol(news.links[0]['href'])
                summary = self.strip_tags(news.summary)
                
         #       title = kf.mmseg_participle(news.title)
         #       content = kf.mmseg_participle(summary)
         #       all_words = title + content
         #       tags = []
         #       for (x, y) in kf.kw_f(all_words,title)[0:10]:
         #           tags.append(y)
         #       s_tag = ",".join(tags)
                #print s_tag
                self.insert_news(news.title,news.author,news.published,summary,"",news.links[0]['href'])

    
    def update_strip_content(self):
        get_news_sql = '''Select content from news'''
        self.db.execute(get_news_sql)
        r=self.db.fetchall()
        for content in r:
            strip_content = self.strip_tags(content[0])
            update_content_sql = '''update news set content = "%s" where id = %i''' % (strip_content,id)
            #print update_content_sql
            self.db.execute(update_content_sql)
            self.db.execute('commit')
            
    
if __name__ == '__main__':
    object_baidu = baidu()
    # filter and update strip content
    #object_baidu.update_strip_content()
    # get all news from baidu
    for link in object_baidu.url:
        object_baidu.process_news(link)
 
