#!usr/bin/env python
# -*- coding:UTF-8 -*-

import re
import requests
import urllib2
import cookielib
import urllib
from io import StringIO
from lxml import etree
from scrapy.selector import Selector

import sys
reload(sys)
sys.setdefaultencoding("utf-8")

class UrlReader(object):
    _headers={'User-Agent' : 
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'}
    _cookie= cookielib.CookieJar()
    _handler=urllib2.HTTPCookieProcessor(_cookie) 
    _opener= urllib2.build_opener(_handler)
    
    def __init__(self, url=None):
        if not url:
            self.response=StringIO("")
            
        self.req=urllib2.Request(url, headers=self._headers)
        self.response= self._opener.open(self.req)
        
    def read(self):
        #return self.response.read()         #.decode('utf-8')   
        return getattr(self.response,"read")()
        
def urlread(url):
    import requests
    req=requests.get(url, headers={'User-Agent' : 
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'})
    req.encoding="utf8"
    return str(req.text)        # fuck! why need str()

class Parser:
    def __init__(self, url=''):
        '''
        urlreader=UrlReader(url)
        xml=urlreader.read()
        '''
        xml=urlread(url)
        self.root=etree.fromstring(xml)     #funnny fromstring方法只能解析str不能解析unicode，这是什么鬼
        sys.stdout.write(">>")        
        #self.root =Selector(text=xml,type="xml")   #当然，使用Scrapy.selector也可以正常使用

    def callback(self,method,obj,tag):
        if method=="FIND_ALL":
            return obj.xpath("//"+tag)
        elif method=="FIND":
            return obj.xpath(tag+"/text()")

    def getfields(self,tag="item"):
        return self.callback(
                method="FIND_ALL",
                obj=self.root,
                tag=tag)
    
    def gettitle(self,obj,tag="title"):
        return self.callback(
                method="FIND",
                obj=obj,
                tag=tag)#.extract()

    def getlink(self,obj,tag="link"):
        #return True and self.callback("FIND",obj,tag).extract() or obj.xpath(tag+"/@href").extract()
        return True and self.callback("FIND",obj,tag) or obj.xpath(tag+"/@href")

    def getdate(self,obj,tag="pubDate"):
        return self.callback("FIND",obj,tag)#.extract()

    def getdescription(self,obj,tag="description"):
        return self.callback("FIND",obj,tag)#.extract()
        
    def run(self):
        for item in self.getfields():
            title=''.join(self.gettitle(item))
            link=''.join(self.getlink(item))
            date=''.join(self.getdate(item))
            description=''.join(self.getdescription(item))
            newitem=Item(title,link,date,description)
            yield newitem
            
class Item:
    def __init__(self,title,link,date,description):
        self.title=title.strip()
        self.link=link.strip()
        self.pubDate=date.strip()
        self.decription=self.filter(description).strip()
        
    def filter(self,description):
        description=re.sub("<.*?>",'',description)
        description=re.sub("\r",'',description)
        description=re.sub("\n",'',description)
        description=re.sub("&nbsp;"," ",description)
        if len(description)>140:
            description=description[:140]+'...'
        return description        

    def __str__(self):
        return "%s\n%s\n%s\n<%s>\n" % (
                    self.title,
                    self.link,
                    self.decription,
                    self.pubDate
                    ) 
        
    __repr__=__str__
    

def test():
    parser=Parser(r"http://zhihurss.miantiao.me/dailyrss")
    for item in parser.run():
        print item
        
if __name__=="__main__":
    test()