import sys
reload(sys)
sys.setdefaultencoding('utf-8')

import feedparser
import os
import datetime
import requests
import xmltodict
import re

from model import News, TSite, TResource
from dbhelper import DBHelper
from config import (
    RSS_URL,
    TUMBLR_SITES_FILE,
)
import utils

__all__ = ['NewsParser', 'TumblrParser']


class NewsParser:
    def __init__(self, url=None, db=None):
        self._url = url
        self.db = db and db or DBHelper()
        self.feeds = None
        self.items = None
        self.marks = ['', '']

    def parse(self, url=None):
        """
            Parse the content of rss of nhk radio, and save the items to database.

            >>> parser = NewsParser()
            >>> parser.parse('http://www.nhk.or.jp/r-news/podcast/nhkradionews.xml')
            True
            >>> parser.parse('')
            False
            >>> parser.parse('www.dddddddddddddddddddddddddd.com')
            True
        """

        _url = url and url or self._url

        if not _url: return False

        try:
            if not self.feeds:
                self.feeds = feedparser.parse(_url)
            else:
                self.feeds = feedparser.parse(
                    RSS_URL, modified=self.marks[0], etag=self.marks[1])
        except Exception, e:
            print e
            self.items = None
            return False
        else:
            if hasattr(self.feeds, 'status') and self.feeds.status != 200:
                self.items = None
            else:
                self.marks[0] = self.feeds.modified
                self.marks[1] = self.feeds.etag
                self.items = self.feeds.entries

        self.save_news()

        return True

    def save_news(self):
        print self.get_links()
        self.db.inserts(self.get_news())

    def get_items(self):
        return self.items

    def get_news(self):
        if not self.items: return []
        return [self.transfer_item(i) for i in self.items]

    def get_links(self):
        if not self.items: return []

        try:
            links = [i.links[0].href for i in self.items]
            return links
        except Exception, e:
            return []

    def get_titles(self):
        if not self.items: return []

        try:
            titles = [i.title for i in self.items]
            return titles
        except Exception, e:
            return []

    @staticmethod
    def transfer_item(item):
        news = News()
        try:
            news.news_id = item.id
            news.title = item.title
            news.file = ''
            news.duration = item.itunes_duration
            news.link = item.links[0].href
            news.size = item.links[0].length
        except Exception, e:
            news = None
        return news


class TumblrParser(object):
    def __init__(self, db=None):
        self.db = db and db or DBHelper()
        self.max_count = 50
        self.count = 0
        self.medium_num = 50
        self.site_file = TUMBLR_SITES_FILE
        self.sites = None
        self.base_url = "http://{0}.tumblr.com/api/read?type={1}&num={2}&start={3}"

        self._register_regex_match_rules()

    def run(self):
        self.get_sites()

    def transfer_item(self, url, site, medium_type):
        resource = TResource()

        resource.site = site
        resource.url = url
        resource.medium_type = medium_type

        return resource

    def save_resources(self, resources):
        self.db.inserts(resources)

    def parse_sites(self, filename):
        if not os.path.exists(filename):
            return []

        with open(filename, "r") as f:
            raw_sites = f.read().rstrip().lstrip()

        raw_sites = raw_sites.replace("\t", ",") \
                             .replace("\r", ",") \
                             .replace("\n", ",") \
                             .replace(" ", ",")
        raw_sites = raw_sites.split(",")

        sites = list()
        for raw_site in raw_sites:
            site = raw_site.lstrip().rstrip()
            if site:
                sites.append(site)
        return sites

    def get_sites(self):
        sites = self.parse_sites(self.site_file)

        for site in sites:
            self.get_resource(site, "photo")
            self.get_resource(site, "video")

    def get_resource(self, site, medium_type):
        self.count = 0
        start = 0

        while self.count < self.max_count:
            media_url = self.base_url.format(site, medium_type,
                                             self.medium_num, start)
            urls = set()
            response = requests.get(media_url)
            if response.status_code == 404:
                print("Site %s does not exist" % site)
                break

            try:
                data = xmltodict.parse(response.content)
                posts = data["tumblr"]["posts"]["post"]
                for post in posts:
                    try:
                        # if post has photoset, walk into photoset for each photo
                        photoset = post["photoset"]["photo"]
                        for photo in photoset:
                            url = self._handle_medium_url(medium_type, photo)
                            if url and ".tumblr.com" in url:
                                print "site: %s - %s\nurl: %s" % (site,
                                                                  medium_type,
                                                                  url)
                                urls.add(url)
                                self.count += 1
                    except:
                        # select the largest resolution
                        # usually in the first element
                        url = self._handle_medium_url(medium_type, post)
                        if url and ".tumblr.com" in url:
                            print "site: %s - %s\nurl: %s" % (site,
                                                              medium_type, url)
                            urls.add(url)
                            self.count += 1

                self.save_resources([
                    self.transfer_item(url, site, medium_type) for url in urls
                ])
                start += self.medium_num
            except KeyError:
                break
            except UnicodeDecodeError:
                print("Cannot decode response data from URL %s" % media_url)
                continue

    def video_hd_match(self):
        hd_pattern = re.compile(r'.*"hdUrl":("([^\s,]*)"|false),')

        def match(video_player):
            hd_match = hd_pattern.match(video_player)
            try:
                if hd_match is not None and hd_match.group(1) != 'false':
                    return hd_match.group(2).replace('\\', '')
            except:
                return None

        return match

    def video_default_match(self):
        default_pattern = re.compile(r'.*src="(\S*)" ', re.DOTALL)

        def match(video_player):
            default_match = default_pattern.match(video_player)
            if default_match is not None:
                try:
                    return default_match.group(1)
                except:
                    return None

        return match

    def _register_regex_match_rules(self):
        # will iterate all the rules
        # the first matched result will be returned
        self.regex_rules = [self.video_hd_match(), self.video_default_match()]

    def _handle_medium_url(self, medium_type, post):
        try:
            if medium_type == "photo":
                return post["photo-url"][0]["#text"]

            if medium_type == "video":
                video_player = post["video-player"][1]["#text"]
                for regex_rule in self.regex_rules:
                    matched_url = regex_rule(video_player)
                    if matched_url is not None:
                        return matched_url
                else:
                    pass
                    # raise Exception
        except:
            # raise TypeError("Unable to find the right url for downloading. "
            #                 "Please open a new issue on "
            #                 "https://github.com/dixudx/tumblr-crawler/"
            #                 "issues/new attached with below information:\n\n"
            #                 "%s" % post)
            pass


if __name__ == "__main__":
    import doctest
    doctest.testmod()
