# -*- coding: utf-8 -*-
from micolog_plugin import *
from model import *
import re
from google.appengine.api import urlfetch
from xml.dom import minidom
from HTMLParser import HTMLParser


class pick_rss(Plugin):
    def __init__(self):
        Plugin.__init__(self,__file__)
        self.author="Fred"
        self.authoruri="mailto:ad@fengsage.cn"
        self.uri="http://fengsageblog.appspot.com"
        self.description="RSS采集"
        self.name="PickRss"
        self.version="0.1"
        self.register_urlmap('pick',self.getFeed)

    #RSS列表、删除RSS
    def get(self,page):
        if page.param("delid")=='':
            listit = FeedList()
            querys = listit.all()
            return self.render_content("pick_rss.html",{'list':querys})
        else:
            #删除
            listit = FeedList()
            querys=listit.all().filter('name =', page.param("delid")).fetch(1)
            for query in querys:
                query.delete()
                #self.redirect('/')
                return "Delete it successfully! <a href='?'>Click here BACK</a>"
    #新增RSS
    def post(self,page):
        query = FeedList()
        query.name =page.param("name")
        query.feedurl = page.param("feedurl")
        query.abconf = page.param("abconf")
        if page.param("allowComment") == 'on':
            query.allow_comment = True
        else:
            query.allow_comment = False
            
        if page.param("autoUrl") == 'on':
            query.autoUrl = True
        else:
            query.autoUrl = False
            
        if page.param("repReg"):
            query.repReg = page.param("repReg")
        query.put()
        return self.get(page)

    def getFeed(self,page=None,*arg1,**arg2):
        listit = FeedList()
        querys = listit.all()
        #遍历数据库记录数据
        for detal in querys:
            #rss adr
            url=str(detal.feedurl)
#            logging.info("**"+url);
            
            #读取RSS连接,模拟浏览器
            headers = { 'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 6.0; zh-CN; rv:1.9.1.9) Gecko/20100315 Firefox/3.5.9'}

            result = urlfetch.fetch(url)
            
            rss_source = result.content
#            logging.info(rss_source)

            #gb2312编码转换为UTF-8
            if 'encoding="gb2312"' in rss_source:
                rss_source = result.content.decode('gb2312').encode('UTF-8')
                rss_source = rss_source.replace('encoding="gb2312"','encoding="utf-8"')
                
#            logging.info(rss_source)
            if result.status_code == 200:
                #解析XML开始
                file_xml = minidom.parseString(rss_source)
                
                rssNOs=file_xml.getElementsByTagName('rss')
                rssver='1.0'
                
                #判断RSS版本
                for rssNO in rssNOs:
                    rssver=rssNO.getAttribute('version')
                if rssver=='1.0':
                    artList='entry'
                    artTitle='title'
                    artLink='link'
                    artText='content'
#                    artTime='date'
                else:
                    artList='item'
                    artTitle='title'
                    artLink='link'
                    artText='description'
#                    artTime='pubDate'
                
                #获得文章根节点
                items = file_xml.getElementsByTagName(artList)
                
                flag=''
                latestId=detal.latest
                ifFirst=0
                #按顺序遍历插入数据，与实际数据时间是相反的
                for item in items:
                    entry=Entry()
                    entry.title=item.getElementsByTagName(artTitle)[0].firstChild.data
                    entry.author_name = 'robot'
                    
                    #自动构建URL（同步GoogleTranslate）
                    if detal.autoUrl:
                        translate_url = 'http://translate.google.com/translate_a/t?client=t&text=%s&hl=en&sl=zh-CN&tl=en&pc=0'%entry.title.replace(' ','_').encode("utf-8")
                        translate_result = urlfetch.fetch(translate_url,None,urlfetch.GET,headers)
                        if translate_result.status_code == 200:
                            translate_content = translate_result.content.split(',')[0] \
                                .replace('{"sentences":[{"trans":','')  \
                                .replace('\"','')   \
                                .replace('-','')    \
                                .replace(' ','_')   \
                                .replace(' ','_')   
                                
                            logging.info("*********"+translate_content)
                            entry.slug = translate_content
                    
                    if detal.allow_comment:
                        entry.allow_comment = True
                    else:
                        entry.allow_comment = False
                    
                    if rssver=='1.0':
                        flag=item.getElementsByTagName(artLink)[0].getAttribute('href')
                    else:
                        flag=item.getElementsByTagName(artLink)[0].firstChild.data

                    #print flag
                    #break
                    #保存最后一条记录，防止重复采集
                    if latestId=='last':
                        detal.latest=flag
                        latestId=flag
                        ifFirst=1
                        db.put(detal)
                    else:
                        if flag==latestId:
                            break
                        else:
                            if ifFirst==0:
                                detal.latest=flag
                                db.put(detal)
                                ifFirst=1
                        db.put(detal)
                    #???<![CDATA[?????????
#                    if datal.synTime:
#                        artPubDate=item.getElementsByTagName(artTime)[0].firstChild.data
#                        entry.date = artPubDate
                        
                    artContent=item.getElementsByTagName(artText)[0].firstChild.data
#                    logging.info(artContent)
                    #根据表达式替换文本
                    if detal.abconf=='1':
                        if detal.repReg:
                            for txt in detal.repReg.split(','):
                                rep = txt.split(':')
                                artContent = re.sub(rep[0],rep[1],artContent)
                                
                    #过滤所有img标签
                    if detal.abconf=='2' :
                        artContent = re.sub("<img(.*?>)","",artContent)
                    #过滤所有a标签
                    if detal.abconf=='3' :
                        artContent = re.sub("<a([^>]*?)>","",artContent)  
                        artContent = re.sub("</a>","",artContent)
                    if detal.abconf=='4' :
                        artContent = re.sub("<img(.*?>)","",artContent)
                        artContent = re.sub("<a([^>]*?)>","",artContent)  
                        artContent = re.sub("</a>","",artContent)                     
                    #替换所有换行符
                    artContent=artContent.replace('\n', "<br />")
                    entry.content=artContent
                    entry.save(True)
                #it works
                #print "Successful"

class FeedList(db.Model):
    name = db.StringProperty(multiline=False,default='Fred')
    feedurl = db.StringProperty(multiline=False,default='http://hi.baidu.com/429263181/rss')
    latest = db.StringProperty(multiline=False,default='last')
    abconf = db.StringProperty(multiline=False,default='0')
    repReg = db.StringProperty(multiline=False)
#    synTime = db.BooleanProperty(default=True)
    autoUrl = db.BooleanProperty(default=False)
    allow_comment = db.BooleanProperty(default=True) #allow comment