#!usr/bin/env python
# -*- coding:UTF-8 -*-

import re
import requests
import urllib2
import cookielib
from io import StringIO, BytesIO
from lxml import etree
from scrapy.selector import Selector

import sys
reload(sys)
sys.setdefaultencoding("utf-8")

class UrlReader(object):
    _headers={'User-Agent' : 
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'}
    _cookie= cookielib.CookieJar()
    _handler=urllib2.HTTPCookieProcessor(_cookie) 
    _opener= urllib2.build_opener(_handler)
    
    def __init__(self, url=None):
        if not url:
            self.response=StringIO("")
            
        self.req=urllib2.Request(url, headers=UrlReader._headers)
        self.response= self._opener.open(self.req)
        
    def read(self):
        #return self.response.read()         #.decode('utf-8')   
        return getattr(self.response,"read")()
        
def urlread(url):
    # import requests
    req=requests.get(url, headers={'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'})
    req.encoding="utf8"
    #print type(req.text),type(req.content) # req.text is unicode ,req.content is str
    #return str(req.text)        # fuck why use str()
    return req.content

class Parser:
    #urlreader=UrlReader()
    
    # def __init__(self, url=''):
    def __init__(self, feed):
        #urlreader=UrlReader(url)
        #xml=urlreader.read() 
        self.url=feed[0]
        self.name=feed[1]
        xml=urlread(self.url)
        
        # test
        # print type(xml)
        # parser=etree.XMLPullParser(ns_clean=True)
        # raw_input('>>')
        # mark，etree下得xmlparser和htmlparser都有问题，唯独xmlpullparser可以正常使用.
        # parser=etree.XMLPullParser()   #mark，etree下得xmlparser和htmlparser都有问题，唯独xmlpullparser可以正常使用.
        # self.root= etree.parse(StringIO(unicode(xml)),parser)
        # self.root= etree.parse(xml,parser)
        # self.root=root=etree.HTML(xml)
        #
        # self.root=etree.fromstring(xml,parser)     #funnny fromstring方法只能解析str不能解析unicode，这是什么鬼
        # raw_input("<<")
        
        self.parser=parser= etree.XMLParser()
        self.root=etree.parse(BytesIO(xml), parser)     #funnny fromstring方法只能解析str不能解析unicode，这是什么鬼
        
        print u'create process -> %s' % self.url

      
        #当然，使用Scrapy.selector也可以正常使用
        #self.root =Selector(text=xml,type="xml")
        # for i in self.root.xpath('//*'):
            # print i.tag

    def callback(self,method,obj,tag):
        if method=="FIND_ALL":
            return obj.xpath("//"+tag)
        elif method=="FIND":
            return obj.xpath(tag+"/text()")

    def getfields(self,tag="item"):
        return self.callback(
                method="FIND_ALL",
                obj=self.root,
                tag=tag)
    
    def gettitle(self,obj,tag="title"):
        return self.callback(
                method="FIND",
                obj=obj,
                tag=tag)#.extract()

    def getlink(self,obj,tag="link"):
        #return True and self.callback("FIND",obj,tag).extract() or obj.xpath(tag+"/@href").extract()
        #return True and self.callback("FIND",obj,tag) or obj.xpath(tag+"/@href")
        return obj.xpath('link/text() | link/@href')
        
    def getdate(self,obj,tag="pubDate"):
        return self.callback("FIND",obj,tag)#.extract()

    def getdescription(self,obj,tag="description"):
        return self.callback("FIND",obj,tag)#.extract()
        
    def run(self, url=None):
    
        print u'>>> Parsing %s' % self.url
        for item in self.getfields():
            # print etree.tostring(item)        
            title=''.join(self.gettitle(item))
            link=''.join(self.getlink(item))
            date=''.join(self.getdate(item))
            description=''.join(self.getdescription(item))
            name=self.name
            newitem=Item(title, link, date, description,name)
            
            yield newitem
            
class Item:
    def __init__(self, title, link, date, description,name):
        self.title = title.strip()
        self.link= link.strip()
        self.pubDate= date.strip()
        self.description= self.filter(description).strip()
        self.name=name
        
    def filter(self,description):
        description=re.sub("<.*?>",'',description)
        description=re.sub("\r",'',description)
        description=re.sub("\n",'',description)
        description=re.sub("&nbsp;"," ",description)
        if len(description)>240:
            description=description[:240]+'...'
        return description

    def __str__(self):
        return u'\n'.join([u'>>> '+self.title,
                           self.link,
                           self.description,
                           u'<'+self.pubDate+u'>\n'
                           ])
        
    __repr__=__str__
    

def test():
    # parser=Parser(r"http://zhihurss.miantiao.me/dailyrss")
    # parser=Parser(r"http://www.ruanyifeng.com/blog/atom.xml")
    parser=Parser(r"http://jandan.net/feed")
    for item in parser.run():
        print str(item)
        
        # pass
        
if __name__=="__main__":
    test()
