#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: bruce
# Email: lixiangning888@gmail.com
# Description: collect news from news.sina.com and store into mysql databases
# If any problem in the code, please sent an email

import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import urllib2
import re
import time
import pprint
import cStringIO
import formatter
import MySQLdb
from BeautifulSoup import BeautifulSoup
from HTMLParser import HTMLParser
from urlparse import urlparse

pp = pprint.PrettyPrinter()

class sina:
    def __init__(self):
        #self.regex = {
        #     'urllistreg': 'href="([\s\S]+?)"'
        #}
        
        self.url = {
             'homelist': 'http://news.sina.cn/?sa=d1t110v414&vt=4',
             'guonei': 'http://news.sina.cn/?sa=t141d48v509&vt=4'
        }
        self.db = self.initDB()
        self.initHTMLopener()
    
    def initDB(self):
        '''init database and creat table if first time run this script'''
        conn = MySQLdb.connect(host="localhost",user="root",passwd="ironport",db="news",charset="utf8")
        db = conn.cursor()
        '''create table for news check already collect or not'''
        collect_repeat_sql = '''create table if not exists `collect_repeat`
                             (`id` int(20) not null primary key auto_increment,
                             `url` text)
                             DEFAULT CHARSET=utf8'''
        db.execute(collect_repeat_sql)
        db.execute('commit')
        news_sql = '''create table if not exists `news`
                   (`id` int(20) not null primary key auto_increment,
                   `title` text,
                   `author` text,
                   `time` text,
                   `content` text,
                   `tag` text)
                   DEFAULT CHARSET=utf8'''
        db.execute(news_sql)
        db.execute('commit')
        return db
        
    def initHTMLopener(self):
        opener = urllib2.build_opener()
        user_agent = 'Mozilla/5.0 (X11; Linux i686; rv:7.0.1) Gecko/20100101 Firefox/7.0.1'
        opener.addheaders = [('User-agent',user_agent)]
        urllib2.install_opener(opener)
    
    def get_html(self,url_base):
        response = urllib2.urlopen(url_base).read()
        return unicode(response,'utf-8')
    
    def get_url(self,response):
        #urllist = re.findall(self.regex['urllistreg'],response)
        urllist = []
        soup = BeautifulSoup(response)
        for link in soup.findAll('a', attrs={'href': re.compile("^http://")}):
            urllist.append(link.get('href'))
        return urllist
    
    def filter_real_url_and_add_params(self,urllist):
        filter_url_list = []
        accept_list = ['news','sports','mil','tech','finance','2012']
        p = re.compile(r'^t\d{2,3}v+')
        for url in urllist:
            parsed_url = urlparse(url)
            if parsed_url[4].find('sa=') == 0 and parsed_url[1].split('.')[0] in accept_list and p.match(parsed_url[4].split('=')[1][0:5]) != None:
               merge_url = "http://%s/?%s" % (parsed_url[1],parsed_url[4])
               filter_url_list.append(merge_url)
            #pp.pprint(parsed_url[1])
            #pp.pprint(parsed_url[4].find('sa='))
            #pp.pprint(parsed_url[4].split('=')[1][0:5])
            #pp.pprint(p.match(parsed_url[4].split('=')[1][0:5]))
        return filter_url_list

    def strip_tags(self, html):
        a = re.compile('\r+|\n+|\t+')
        h = re.compile('<[^>]+>')
        c = re.compile('>+')
        #outstream = cStringIO.StringIO()
        #parser = HTMLParser(formatter.AbstractFormatter(formatter.DumbWriter(outstream)))
        #parser.feed(html)
        #content = outstream.getvalue()
        #outstream.close()
        b = a.sub("",html)
        d = h.sub("",b)
        e = c.sub("",d)
        content = re.sub("var\s.*","",e)
        return content

    def get_info_from_page(self,real_link_list):
        for link in real_link_list:
            print link
            check_status = self.check_already_exist(link)
            if check_status:
               continue
               #b = 1+1
            else:
               html = self.get_html(link)
               self.insert_symbol(link)
               soup = BeautifulSoup(html, fromEncoding="utf-8")
               title = soup.html.head.title.string
               #print str(soup.body.h4)
               time_from = self.strip_tags(str(soup.body.h4))
               print time_from
               print type(time_from)
               if time_from == "None" or time_from == "" or time_from is None:
                  print "title is none"
                  continue
               else:
                  #strip_time_from = self.strip_html(time_from)
                  split_time_from = time_from.split(" ")
                  time = '''%s %s''' % (split_time_from[0], split_time_from[2])
                  author = split_time_from[-1]
                  contentdiv = soup.body.find('div',{'id':'content'})
                  #pp.pprint(contentdiv)
                  #contenttext = contentdiv.findAll(text = True)
                  #pp.pprint(contenttext)
                  content = self.strip_tags(str(contentdiv))
                  #print sys.getdefaultencoding()
                  #print title
                  #print author
                  #print time
                  print content
                  #pp.pprint(title+ " " +time+ " " +author+ " " + content)
                  self.insert_news(title,author,time,content,"",link)
               
               #pp.pprint(content)
    
    def get_utf8(self,CR):
        return unicode(CR,"utf-8",'ignore')
    
    def insert_symbol(self,link):
        insert_sql = '''insert into collect_repeat values('','%s')''' % link
        self.db.execute(insert_sql)
        self.db.execute('commit')
    
    def insert_news(self,title="",author="",time="",content="",tag="",link=""):
        news = '''insert into news values('','%s','%s','%s','%s','%s','%s')''' % (title,author,time,content,tag,link)
        #print news
        self.db.execute(news)
        self.db.execute('commit')
        
    def check_already_exist(self,link):
        check_sql = '''Select count(id) from collect_repeat where url="%s"''' % link
        #c = self.db.cursor()
        #c.execute(check_sql)
        self.db.execute(check_sql)
        r=self.db.fetchone()
        #print r[0]
        #number = result.num_rows()
        return r[0]


if __name__ == '__main__':
    object_sina = sina()
    for number in range(1,3):
        if number == 1:
            url = object_sina.url['homelist']
        else:
            url = object_sina.url['homelist'] + "&cpage=" + str(number)
        print url
        response = object_sina.get_html(url)
        urllist = object_sina.get_url(response)
        real_link_list = object_sina.filter_real_url_and_add_params(urllist)
        object_sina.get_info_from_page(real_link_list)
        #pp.pprint(response)
        #pp.pprint(real_link_list)
