#!/usr/bin/env python
# -*- coding: UTF-8 -*-

from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.api import memcache
from google.appengine.api import urlfetch

from google.appengine.ext import db
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app

try:
  from xml.etree import ElementTree
except ImportError:
  from elementtree import ElementTree
from xml.dom import minidom
import logging
import time
import datetime
import os
import oauth
import simplejson
import urllib
import urllib2
import random
import sys
"""
    XML Resource
    Twitter
    RSS Feed
"""

class ScrapeData(db.Model):
    pass
class RssToJson():
    def __init__(self, url):
        self.url = url
        self.Get()
        self.Parse()
    def Get(self):
        self.fetch = urlfetch.fetch(self.url)
    def Parse(self):
        dom = {}
        _dom = minidom.parseString(self.fetch.content)
        dom["title"] = _dom.getElementsByTagName('title')[0].firstChild.data.encode("utf-8")
        dom["item"] = []
        _item = _dom.getElementsByTagName('item')
        for items in _item:
            nodes = {}
            for ch in items.childNodes:
                nodeName = ch.nodeName.encode("utf-8")
                if len(items.getElementsByTagName(nodeName)) is 1:
                    firstChild = items.getElementsByTagName(nodeName)[0].firstChild
                    if firstChild != None:
                        text = firstChild.data.encode("utf-8")
                    nodes[nodeName] = text
            dom["item"].append(nodes)
        self.dom = dom
    def ToJson(self):
        import simplejson
        return simplejson.dumps(self.dom)
       
class YahooJLP():
    def __init__(self,appid=None):
        self.url = 'http://jlp.yahooapis.jp/MAService/V1/parse'
        self.appid = appid
    def Get(self, sentence=None, result=None):
        if not result:
            result = 'ma'
        url = '%s?appid=%s&sentence=%s&results=%s' % (self.url, self.appid, sentence, result)
        fetch = urlfetch.fetch(url)
        return fetch
    def ParseTo(self, str):
        dom = {}
        _dom = minidom.parseString(str)
        dom['total_count'] = _dom.getElementsByTagName('total_count')[0].firstChild.data.encode("utf-8")
        dom["filtered_count"] = _dom.getElementsByTagName('filtered_count')[0].firstChild.data.encode("utf-8")
        dom["word"] = []# Dictionary
        _list = _dom.getElementsByTagName('word')
        for items in _list:
            nodes = {}
            for ch in items.childNodes:
                #text=""
                nodeName = ch.nodeName.encode("utf-8")
                firstChild = items.getElementsByTagName(nodeName)[0].firstChild
                if firstChild != None:
                    text = firstChild.data.encode("utf-8")
                nodes[nodeName] = text
            dom["word"].append(nodes)
        return dom
    def ToJson(self, dect):
        import simplejson
        return simplejson.dumps(dect)
    def GetJson(self, word=None):
        if word is not None:
            w = self.Get(word)
            dic = self.ParseTo(w.content)
            return self.ToJson(dic)
        else:
            return self.ToJson({'status':'error', 'error_code':'500'})
        
class mixiGraphScraperXML(webapp.RequestHandler):
    def __init__(self):
        self.xml = {
            "resource":''
        }
    def get(self):
        get = self.request.get('word');
        RSS = RssToJson('http://searchranking.yahoo.co.jp/rss/word5-rss.xml')
        s = simplejson.loads(RSS.ToJson())
        date = s["item"][0]["pubDate"]
        title = s["item"][0]["title"]
        YDK = YahooJLP('WjIojuyxg65IN8lzY5yvacimEULhZHI__oSaLThkKoEcuu13jRogqTxF1vzJ.upC0X8-')
        str = YDK.GetJson(s["item"][0]["title"])
        s = simplejson.loads(str)
        s.update({"pubDate":date, "title":title})
        self.response.headers['Content-Type'] = 'text/javascript; charset=utf-8;'
        self.response.out.write(s)
            
class mixiGraphScraperXML2(webapp.RequestHandler):
    def get(self):
        # self.response.headers.add_header('Set-Cookie', '%s=%s; expires=%s' % (i, token_dic[i], expires))
        self.response.out.write('keyword02')
        # self.redirect('/')

application = webapp.WSGIApplication([('/testword', mixiGraphScraperXML)], debug=True)


def main():
    run_wsgi_app(application)

if __name__ == "__main__":
    main()
