# -*- coding: utf-8 -*-

import logging
import math
import random
import re

from datetime import datetime, timedelta

from google.appengine.ext import webapp
from google.appengine.ext import db

from google.appengine.api import urlfetch
from google.appengine.api.urlfetch_errors import DownloadError
from google.appengine.api import taskqueue
from google.appengine.api import images
from BeautifulSoup import BeautifulSoup, Comment


from utilities import *
import feedparser
from config import Config


class FeedManager(webapp.RequestHandler):
    def get(self):
      feed_retrieval_deadline = datetime.now() - timedelta(minutes = Config.feed_retrieval_delay_minutes())
      feeds = Config.get_feeds()
      for feed in feeds:
        if feed.lastRetrieved > feed_retrieval_deadline:
          logging.debug('Skipping feed %s.' % feed.url)
          continue
        logging.debug('Getting feed %s.' % feed.url)
        try:
          result = urlfetch.fetch(feed.url)
        except DownloadError, error:
          logging.warning('Could not get feed %s - %s' % (feed.url, error.message))
          continue
        if result.status_code == 200:
          feed.lastRetrieved = datetime.now()
          feed.put()
          self.__parse_feed(result.content)
        elif result.status_code == 500:
          logging.error('Feed %s returned with status code 500.' % feed.url)
        elif result.status_code == 404:
          logging.error('Error 404: Nothing found at %s.' % feed.url)

    def __parse_feed(self, feed_content):
      feed = feedparser.parse(feed_content)
      for entry in feed.entries:
        self.__prepare_article(entry)
        logging.debug('Prepare article %s.' % entry.link)


    def __prepare_article(self, entry):
      url = entry.link

      if(entry.has_key('updated_parsed')):
        date = datetime(*entry.updated_parsed[:6])
      else:
        date = datetime.now()

      self.__store_article(url, date)


    def __store_article(self, url, date):
        tldsList = ['.com', '.net', '.org', '.es', '.biz', '.info', '.xxx', '.asia.com', '.gr.com', '.us.org', '.coop', '.aero', '.name', '.museum', '.pro', '.jobs', '.travel', '.mobi', '.cat', '.law.pro', '.med.pro', '.cpa.pro', '.eng.pro', '.avocat.pro', '.bar.pro', '.recht.pro', '.jur.pro', '.aaa.pro', '.aca.pro', '.acct.pro', '.arc.pro', '.bus.pro', '.chi.pro', '.chiro.pro', '.den.pro', '.dent.pro', '.prof.pro', '.teach.pro', '.nurse.pro', '.prx.pro', '.pharma.pro', '.rel.pro', '.min.pro', '.vet.pro', '.cfp.pro', '.ntr.pro', '.opt.pro', '.pha.pro', '.pa.pro', '.pod.pro', '.psy.pro', '.ed.pro', '.pt.pro', '.pr.pro', '.nur.pro', '.dds.pro', '.tel', '.ac.ae', '.ae', '.af', '.ah.cn', '.asia', '.av.tr', '.bbs.tr', '.bd', '.bh', '.biz.tr', '.bj.cn', '.bn', '.bt', '.cn', '.co.uk', '.co.ae', '.co.id', '.co.il', '.co.in', '.co.ir', '.co.jp', '.co.kr', '.co.om', '.co.th', '.co.tj', '.co.uz', '.com.af', '.com.bd', '.com.bh', '.com.bn', '.com.bt', '.com.cn', '.com.gu', '.com.hk', '.com.iq', '.com.jo', '.com.kg', '.com.kh', '.com.kw', '.com.kz', '.com.lb', '.com.lk', '.com.mm', '.com.mo', '.com.mv', '.com.my', '.com.np', '.com.om', '.com.ph', '.com.pk', '.com.ps', '.com.qa', '.com.ru', '.com.sa', '.com.sg', '.com.sy', '.com.tj', '.com.tl', '.com.tr', '.com.tw', '.com.uz', '.com.vn', '.com.ye', '.cq.cn', '.cx', '.dr.tr', '.edu.tr', '.fj.cn', '.gd.cn', '.gov.ae', '.gs.cn', '.gu', '.gx.cn', '.gz.cn', '.ha.cn', '.hb.cn', '.he.cn', '.hi.cn', '.hk', '.hk.cn', '.hl.cn', '.hn.cn', '.id', '.il', '.in', '.info.tr', '.iq', '.ir', '.jl.cn', '.jo', '.jp', '.js.cn', '.jx.cn', '.kg', '.kh', '.kp', '.kr', '.kw', '.kz', '.la', '.lb', '.lk', '.ln.cn', '.med.sa', '.mil.ae', '.mm', '.mn', '.mo', '.mo.cn', '.msk.ru', '.mv', '.my', '.name.tr', '.ne.jp', '.ne.kr', '.net.ae', '.net.lk', '.net.sa', '.net.tr', '.nm.cn', '.np', '.nx.cn', '.om', '.or.kr', '.or.th', '.org.ae', '.org.lk', '.org.sa', '.org.tr', '.ph', '.pk', '.ps', '.qa', '.qh.cn', '.ru', '.sa', '.sc.cn', '.sch.ae', '.sd.cn', '.sg', '.sh.cn', '.sn.cn', '.spb.ru', '.su', '.sx.cn', '.sy', '.tel.tr', '.th', '.tj', '.tj.cn', '.tl', '.tm', '.tp', '.tr', '.tv.tr', '.tw', '.tw.cn', '.uz', '.vn', '.xj.cn', '.ธุรกิจ.ไทย', '.公司', '.中国', '.ලංකා', '.网络', '.香港', '.台湾', '.台灣', '.ایران', '.امارات', '.السعودية', '.ไทย', '.рф', '.تونس', '.قطر', '.இலங்கை', '.فلسطين', '.ye', '.ao', '.aq', '.art.sn', '.bf', '.bi', '.bj', '.bw', '.cd', '.cf', '.cg', '.ci', '.cm', '.co.ao', '.co.bi', '.co.bw', '.co.ci', '.co.cm', '.co.ke', '.co.ls', '.co.ma', '.co.mu', '.co.mw', '.co.mz', '.co.na', '.co.tz', '.co.ug']

        parsedurl = urlparse(url)
        entryDomain = parsedurl.hostname
        
        if entryDomain.startswith('www.'):
          entryDomain = entryDomain[4:]

        domainsquery = Domain.all()
        domainsquery.filter(" domain = ", entryDomain)
        domains = domainsquery.fetch(1)

        if len(domains) > 0:
          aDomain = domains[0]
        else:
          aDomain = Domain(domain = entryDomain, pubDate = date)
          aDomain.put()

          logging.info('Adding new article "%s"' % entryDomain)

        return aDomain
        