#!/usr/bin/env python 
#coding: utf-8

############################
__author__ = "kauu (kauu@yahoo.cn)"
__version__ = "1.0"
__copyright__ = "Copyright (c) 2008 "
__license__ = "NO"



import re,urllib2, time
from BeautifulSoup import BeautifulSoup
import logging as log
import md5,os
from datetime import datetime

from crawldb import DB_crawl as DB

def rss(url,db = None):
	if not url : 
		log.warn('no rss url path ...')	
		return 1
	if not db :
		log.error('no db connection !!')
		return 1
	import feedparser
	rss = feedparser.parse(url)
	for item in rss['entries']:
		title =  item['title']
		summary =  item['summary']
		publish =  item['updated_parsed']
		url = item['link']
		md = md5.new(url).hexdigest()
		content = parse_content(url)
		print len(content)
		record={'title':title.encode('utf-8'),
			'content':content}
		#	'publish':datetime.now(),
		#	'init_url':url,
		#	'md5':md}
		try:
			smail(title.encode('utf-8'),content)
			time.sleep(1)
			#print record
			print '+'*80
		 #	db.insert('article',record)
		except Exception,string:
			print string
		else:
			print 'send  OK!!!'
		

def parse_content(url):
	html = crawl(url)
	p = BeautifulSoup(html)
	content = p.find('div',{'class':'blogstory'})
	con = ''
	for cc in content : 
		if re.search(r'新一篇.*旧一篇',str(cc)) or re.search(r'<script',str(cc)) :
			#print cc
			pass
		else:
			con+=str(cc)
	return con
import email
import mimetypes
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEImage import MIMEImage
import smtplib

def sendEmail(authInfo, fromAdd, toAdd, subject, plainText, htmlText):

        strFrom = fromAdd
        strTo = ', '.join(toAdd)

        server = authInfo.get('server')
        user = authInfo.get('user')
        passwd = authInfo.get('password')

        if not (server and user and passwd) :
                print 'incomplete login info, exit now'
                return

        # 设定root信息
        msgRoot = MIMEMultipart('related')
        msgRoot['Subject'] = subject
        msgRoot['From'] = strFrom
        msgRoot['To'] = strTo
        msgRoot.preamble = 'This is a multi-part message in MIME format.'

        # Encapsulate the plain and HTML versions of the message body in an
        # 'alternative' part, so message agents can decide which they want to display.
        msgAlternative = MIMEMultipart('alternative')
        msgRoot.attach(msgAlternative)

        #设定纯文本信息
        msgText = MIMEText(plainText, 'plain', 'utf-8')
        msgAlternative.attach(msgText)

        #设定HTML信息
        msgText = MIMEText(htmlText, 'html', 'utf-8')
        msgAlternative.attach(msgText)

       #设定内置图片信息
       #fp = open('test.jpg', 'rb')
       #msgImage = MIMEImage(fp.read())
       #fp.close()
       #msgImage.add_header('Content-ID', '<image1>')
       #msgRoot.attach(msgImage)

       #发送邮件
        smtp = smtplib.SMTP()
       #设定调试级别，依情况而定
        smtp.set_debuglevel(1)
        smtp.connect(server)
        smtp.login(user, passwd)
        smtp.sendmail(strFrom, strTo, msgRoot.as_string())
        smtp.quit()
        return

def smail(subject,content):
        authInfo = {}
        authInfo['server'] = 'smtp.163.com'
        authInfo['user'] = 'kauu'
        authInfo['password'] = 'nice12193355'
        fromAdd = 'kauu@163.com'
        toAdd = ['babatu@gmail.com']
        plainText = ''
        htmlText =content 
        sendEmail(authInfo, fromAdd, toAdd, subject, plainText, htmlText)
def crawl(url,values = None):
	user_agent = "User-Agent:Mozilla/5.0 "\
			+ "(Windows; U; Windows NT 5.1: en-GB;rv:1.8.1.4)"\
			+ "Gecko/20070515 Firefox/2.0.0.4"
	header = {'User-Agent':user_agent}
	if values == None:
		values = {'__EVENTTARGET':'homepage.ascx:PaginationUp',
		'__EVENTARGUMENT': 'pi=2',
		'__VIEWSTATE':'',
		'Search:AddlSearchScope':'all'}

	import urllib
	data = urllib.urlencode(values)
	#data = 'homepage.ascx%3APaginationUp=pi%3D2'
	request = urllib2.Request(url,data,header)
	socket = urllib2.urlopen(request)
	html = socket.read()	
	socket.close()
	return html 

def parsePage():
	url = 'http://blog.csdn.net/kauu'
	values = {'__EVENTTARGET':'homepage.ascx:PaginationUp',
		#'__EVENTARGUMENT': 'pi=2',
		'__VIEWSTATE':'',
		'Search:AddlSearchScope':'all'}
	for i in range(11):
		values['__EVENTARGUMENT']='pi='+str(i)
		html = crawl(url,values)
		p = BeautifulSoup(html)
		contents = p.findAll('div',{'class':'user_article'})
		for i in contents:
			u = re.findall(r'<code><a href="(http://blog.csdn.net/.*.aspx)">阅读全文',str(i))
			title = re.findall(r'aspx">(.*?)<cite',str(i))
			t= title[0]
			c= parse_content(u[0])
			smail(t,c)
			time.sleep(2)
		
	

			
test_url ='http://blog.csdn.net/kauu/Rss.aspx' 


if __name__=='__main__':
	db = DB()
	#rss(uu,db)
	parsePage()
