#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
import re
from urllib import urlretrieve
from xml2dict import *
from pprint import pprint
from urlparse import urlsplit, urlunsplit
from os.path import *
from os import mkdir
import logging

logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(process)d %(threadName)s %(message)s', filename='browser.log', filemode='a')
log=logging.getLogger('BROWSER')

class Browser:
    def __init__(self, root_dir, rss_url, regexstr, sitename=''):
        self.sitename=sitename
        self.xmlparser = XML2Dict()
        self.rss_url=rss_url
        self.root=root_dir
        self.media_dir='media'
        self.src_regex=re.compile('src="(.*?)"')
        self.regex=re.compile(regexstr)
        if self.rss_url:
            self.input=urllib2.urlopen(rss_url).read()
            self.rss_xml = self.xmlparser.fromstring(self.input)
        self.check_dirs()

    def check_dirs(self):
        self.site_url=urlsplit(self.rss_url)
        if not self.sitename: self.sitename=self.site_url.netloc
        self.sitedir=join(self.root, self.sitename)
        self.site_mediadir=join(self.root, self.sitename, self.media_dir)
        if not isdir(self.root): mkdir(self.root)
        if not isdir(self.sitedir): mkdir(self.sitedir)
        if not isdir(self.site_mediadir): mkdir(self.site_mediadir)

    def parse_rss(self, limit=10):
        x=0
        out_dict=[]
        pprint(self.rss_xml['rss']['channel']['item'])
        for i in self.rss_xml['rss']['channel']['item']:
            fullpage=urllib2.urlopen(i['link']['value']).read().replace("\n", '').replace("\r", '')
            #print icerik
            content= self.regex.findall(fullpage)
            if content: out_dict.append({'url':i['link']['value'],'title':i['title']['value'], 'content':content[0]})
            if x==limit: break
            x+=1
        return out_dict


    def get_srcs(self, content):
        return self.src_regex.findall(content)

    def fix_srcs(self, node):
        srcs=self.get_srcs(node['content'])
        content=node['content']
        for src in srcs:
            new=self.download_src(src, node['url'])
            content=content.replace(src, new and new or node['url'])
        return content

    def download_src(self, src, resize=None, resample=None):
        mediau=urlsplit(src)
        if not mediau.netloc: src=urlunsplit((self.site_url[0], self.site_url[1], mediau[2], mediau[3], mediau[4]))
        paths=split(mediau[2]+mediau[3])
        sub_dir=join(self.media_dir, paths[0].replace('/', '_')[1:] )
        dir=join(self.root, self.sitename,  sub_dir)
        if not isdir(dir): mkdir(dir)
        filepath=join(dir, paths[1])
        if not isfile(filepath):
            log.info('src: %s, filepath:%s'% (src, filepath) )
            try:
                if 'youtube' in src: return '' #FIXME: Youtube bloccking workaround, convert to a general purpose url skip list.
                get=urlretrieve(src, filepath)
            except: log.exception('download failed')
            if 'Content-Length' in get[1] and int(get[1]['Content-Length'])>0: return join(sub_dir, paths[1])
            else: return ''
        else: return join(sub_dir, paths[1])



    def test_2_file(self, filename='index.html'):
        out_dict=self.parse_rss(100)
        out=open(join(self.sitedir, filename), 'w')
        for n in out_dict:
            output=self.fix_srcs(n)
            out.write("<h3>%s</h3>\n\n%s<hr>" % (n['title'], output) )
        out.close()


    def test2echo(self):
        out_dict=self.parse_rss(1)
        print self.get_srcs(out_dict[0]['content'])
        print self.fix_srcs(out_dict[0])


if __name__ == "__main__":
    root_dir='/home/sleytr/ofreader_results'
    rss_url = 'http://www.ozgurlukicin.com/rss/haber/'
    rss_url = 'http://www.nethaber.com/xml/rss.aspx?catId=0'
    grss_url = 'http://feeds.gawker.com/gizmodo/excerpts.xml'
    #rss_url='http://www.fazlamesai.net/backend.php'
    #rss_url='http://www.pclabs.gen.tr/feed/'
    regexstr = '<div id="content">(.*?)<div style="clear: both;">'#ozgurlukicin
    regexstr = '<span id="ctl00_ctl00_cphContent_cphContent_lblDate">(.*?)<div style="float: right;'#nethaber.com
    gregexstr = '<div class="entry">(.*?)<div id="related">'#gizmodo
    #b=Browser(root_dir, rss_url, regexstr)
    b=Browser(root_dir, grss_url, gregexstr, sitename='gizmodo.com')
    b.test_2_file()
    #b.test2echo()
