# coding:utf-8
from lxml import etree

from scrapy.spiders import Spider
from reedlive_scrapy.items import InformationItem
from reedlive_scrapy.items import TuikuItem
from scrapy.selector import Selector
from reedlive_scrapy.utils.dbmanager import DbManager
from shortid import ShortId
import time
import scrapy
from dateutil import parser
import re
import json


class ReedLiveSpider(Spider):
    namespaces = [('content', "http://purl.org/rss/1.0/modules/content/")]
    name = "reedlive"

    def start_requests(self):

        self.dbt = DbManager()
        channelList = self.dbt.getItems()
        for item in channelList:
            # print 'getrsstype'+item['rsstype']
            rsstype=item['rsstype']
            # rsstype = self.dbt.getRssType(item['rssType'])
            if rsstype == 0 or rsstype==1:
                yield scrapy.Request(item['link'],
                                     meta={
                                         'dont_redirect': True,
                                         'handle_httpstatus_list': [301, 302]
                                     },
                                     dont_filter=True,
                                     callback=self.getRsstems,
                                     errback=self.error_callback
                                     )
            elif rsstype==2:
                HEADER={
                    'Authorization':'Basic MTkyLjE2OC4xMjMuMjQ0OnR1aWNvb2w='
                }
                yield scrapy.Request('http://api.tuicool.com/api/sites/fU3UjyA.json?pn=0&size=30&is_pad=1',
                                     meta={
                                         'dont_redirect': True,
                                         'handle_httpstatus_list': [301, 302]
                                     },
                                     headers=HEADER,
                                     dont_filter=True,
                                     callback=self.getTuikuItem,
                                     errback=self.error_callback
                                     )

    def getRsstems(self, response):
        channel = self.dbt.getChannelFromUrl(response.url)
        rsstype = self.dbt.getRssType(channel['rssType'])
        rssitemSet = set()
        if rsstype == 0:
            rssitemSet = self.parse_rss(response.body, channel)
        elif rsstype == 1:
            rssitemSet = self.parse_feed(response.body, channel)

        itemCount=0
        for item in rssitemSet:
            if not self.dbt.isItemExist(item['link']):
                itemCount=itemCount+1
                yield item

        if itemCount != 0:
            print "update"
            channel['updateTime'] = long(time.time())
            self.dbt.updateChannelTime(channel)

    def getTuikuItem(self,response):
        print 'gettuikuitem'
        channel = self.dbt.getChannelFromUrl(response.url)
        rssitemSet = self.parse_tuiku(response.body, channel)

        itemCount=0
        syncSet = set()
        for item in rssitemSet:
            if not self.dbt.isIdExist(item['mid']):
                itemCount=itemCount+1
                syncSet.add(item)
                yield item

        if itemCount != 0:
            channel['updateTime'] = long(time.time())
            self.dbt.updateChannelTime(channel)
            for item in syncSet:
                print 'sync success'
                time.sleep(3)
                # for item in items:
                base_url = 'http://api.tuicool.com/api/articles/' + item['mid'] + '.json?need_image_meta=1&type=2'
                print 'start parse' + base_url
                HEADER = {
                    'Authorization': 'Basic MTkyLjE2OC4xMjMuMjQ0OnR1aWNvb2w=',
                    'mid':item['_id']
                }
                yield scrapy.Request(base_url,
                                     meta={
                                         'dont_redirect': True,
                                         'handle_httpstatus_list': [301, 302]
                                     },
                                     headers=HEADER,
                                     dont_filter=True,
                                     callback=self.updateTuikuItem,
                                     errback=self.error_callback
                                     )


    def parse_rss(self, body, channel):
        # magical_parser = etree.XMLParser(encoding='utf-8', recover=True)
        page = etree.XML(body)
        # root = ET.fromstring(body)
        namespaces = {'content': "http://purl.org/rss/1.0/modules/content/",
                      "wfw": "http://wellformedweb.org/CommentAPI/",
                      "dc": "http://purl.org/dc/elements/1.1/",
                      "atom": "http://www.w3.org/2005/Atom",
                      "sy": "http://purl.org/rss/1.0/modules/syndication/",
                      "slash": "http://purl.org/rss/1.0/modules/slash/"}
        itemsSet = set()

        itempaths=page.xpath("channel/item")
        for bodyetree in itempaths:
            item = InformationItem()

            mTitle = ""
            mLink = ""
            if bodyetree.find('title') is not None:
                mTitle = bodyetree.find('title').text

            if bodyetree.find("link") is not None:
                mLink = bodyetree.find("link").text

            mpub = ""
            if bodyetree.find("pubDate") is not None or bodyetree.find('published') is not None:
                mDate = bodyetree.find('pubDate').text
                mDate_time = parser.parse(mDate)
                mpub = time.mktime(mDate_time.timetuple())

            mDescription = ""
            if bodyetree.find('description') is not None:
                mDescription = bodyetree.find('description').text

            mContent = ""
            if bodyetree.findall('content:encoded', namespaces) is not None:
                context = bodyetree.findall('content:encoded', namespaces)
                if len(context) > 0:
                    mContent = bodyetree.findall('content:encoded', namespaces)[0].text
            if bodyetree.find('summary') is not None:
                mContent = bodyetree.find('summary').text

            if mContent != None and len(mContent) == 0 and mDescription != None and len(mDescription) > 0:
                mContent = mDescription

            dr = re.compile(r'<[^>]+>', re.S)
            if mDescription:
                mSubTitle = dr.sub('', mDescription).strip()
            else:
                mSubTitle = dr.sub('', mContent).strip()

            mSubTitle = Selector(text=mSubTitle).xpath('string(.)').extract_first()
            if len(mSubTitle) > 60:
                mSubTitle = mSubTitle[0:59]

            mTime = time.time()

            item['_id'] = ShortId().generate()
            item['title'] = mTitle
            item['subTitle'] = mSubTitle
            item['link'] = mLink
            if mpub==None or not isinstance(mpub,float) or mpub == 0:
                item['published'] = long(mTime)
            else:
                item['published'] = long(mpub)
            item['updated'] = long(mTime)
            item['channelId'] = channel['_id']
            item['description'] = mDescription
            item['content'] = mContent
            item['textCount'] = self.GetWordNums(dr.sub('', mContent).strip())
            itemsSet.add(item)
        return itemsSet

    def parse_feed(self, body, channel):
        selector = Selector(text=body)
        selector.register_namespace('content', "http://purl.org/rss/1.0/modules/content/")
        selector.register_namespace("wfw", "http://wellformedweb.org/CommentAPI/")
        selector.register_namespace("dc", "http://purl.org/dc/elements/1.1/")
        selector.register_namespace("atom", "http://www.w3.org/2005/Atom")
        selector.register_namespace("sy", "http://purl.org/rss/1.0/modules/syndication/")
        selector.register_namespace("slash", "http://purl.org/rss/1.0/modules/slash/")
        items = selector.xpath("//feed")
        itemsSet = set()
        for it in items.xpath("entry"):
            item = InformationItem()
            mTitle = ""
            if it.xpath('title/text()'):
                mTitle = it.xpath('title/text()').extract_first().encode('utf8')

            mLink = ""
            if it.xpath('link'):
                if it.xpath("link/text()"):
                    mLink = it.xpath('link/text()').extract_first().encode('utf8')
                elif it.xpath('link/@href'):
                    mLink = it.xpath('link/@href').extract_first().encode('utf8')

            mpub = ""
            if it.xpath('published'):
                mDate = it.xpath('published/text()').extract_first()
                mDate_time = parser.parse(mDate)
                mpub = time.mktime(mDate_time.timetuple())

            if it.xpath("pubDate"):
                mDate = it.xpath('pubDate/text()').extract_first()
                mDate_time = parser.parse(mDate)
                mpub = time.mktime(mDate_time.timetuple())

            mDescription = ""
            if it.xpath('description/text()'):
                mDescription = it.xpath('description/text()').extract_first().encode('utf8')

            mContent = ""
            if it.xpath('content:encoded/text()'):
                mContent = it.xpath('content:encoded/text()').extract_first().encode('utf8')
            if it.xpath('summary/text()'):
                mContent = it.xpath('summary/text()').extract_first().encode('utf8')

            dr = re.compile(r'<[^>]+>', re.S)
            if mDescription:
                mSubTitle = dr.sub('', mDescription).strip()
            else:
                mSubTitle = dr.sub('', mContent).strip()

            if len(mSubTitle) > 60:
                mSubTitle = mSubTitle.decode('utf8')[0:59]

            mTime = time.time()

            item['_id'] = ShortId().generate()
            item['title'] = mTitle
            item['subTitle'] = mSubTitle
            item['link'] = mLink
            item['published'] = long(mpub)
            item['updated'] = long(mTime)
            item['channelId'] = channel['_id']
            item['description'] = mDescription
            item['content'] = mContent
            item['textCount'] = self.GetWordNums(dr.sub('', mContent).strip())
            itemsSet.add(item)
        return itemsSet

    def parse_tuiku(self,body,channel):
        res=json.loads(body)
        if res['success']:
            itemsSet = set()
            datas=res['articles']
            for dat in datas:
                item = TuikuItem()
                mDate_time = parser.parse(dat['time'])
                mpub = time.mktime(mDate_time.timetuple())
                mTime = time.time()

                item['_id'] = ShortId().generate()
                item['title'] = dat['title']
                item['published']=long(mpub)
                item['updated'] = long(mTime)
                item['channelId'] = channel['_id']
                item['mid']=dat['id']
                itemsSet.add(item)
            return itemsSet


    def updateTuikuItem(self,response):
        print 'get body'
        body=json.loads(response.body)
        mContent=body['article']['content']
        dr = re.compile(r'<[^>]+>', re.S)
        mSubTitle = dr.sub('', mContent).strip()
        if len(mSubTitle) > 60:
            mSubTitle = mSubTitle[0:59]

        textCount = self.GetWordNums(dr.sub('', mContent).strip())

        self.dbt.updateTuikuItem(response.request.headers['mid'],mContent,mSubTitle,textCount)


    def test_parse(self):
        print 'test'

    def error_callback(self):
        print "error"

    def GetWordNums(self, text):
        num = 0
        for i in text:
            if i not in ' \n!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~"':
                num = num + 1
        return num
