# this file hold some utility methods

import model
from google.appengine.api import urlfetch
from django.utils import simplejson
import urllib
import logging
import re
import sgmllib
import datetime
from StringIO import StringIO
import copy

# For Search Result Acquirement
import myhttplib
from BeautifulSoup import BeautifulSoup
from xml.dom.minidom import getDOMImplementation

CLUSTER_SERVER = '125.119.235.161'#'127.0.0.1'

def encode_multipart_formdata(fields):
    """
    fields is a sequence of (name, value) elements for regular form fields.
    files is a sequence of (name, filename, value) elements for data to be uploaded as files
    Return (content_type, body) ready for httplib.HTTP instance
    """
    BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
    CRLF = '\r\n'
    L = []
    L.append('--' + BOUNDARY)
    L.append('Content-Disposition: form-data; name="%s"' % 'dcs.default.output')
    L.append('')
    L.append('json')

    L.append('--' + BOUNDARY)
    L.append('Content-Disposition: form-data; name="%s"' % 'dcs.clusters.only')
    L.append('')
    L.append('true')    
    
    #L.append('--' + BOUNDARY)
    #L.append('Content-Disposition: form-data; name="%s"' % 'dcs.default.algorithm')
    #L.append('')
    #L.append('haog-stc-en')#haog-fi-en, haog-stc-en, lingo-classic, rough-kmeans, stc-en    
    
    #logging.debug(fields['c2stream'])
    L.append('--' + BOUNDARY)
    L.append('Content-Disposition: form-data; name="c2stream"; filename="web"')
    L.append('Content-Type: application/octet-stream')
    L.append('')
    L.append(fields['c2stream'])
    
    L.append('--' + BOUNDARY + '--')
    L.append('')
    body = CRLF.join(L)
    content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
    return content_type, body


def seal_query_xml(query, documents):
    impl = getDOMImplementation()
    doc = impl.createDocument(None, "searchresult", None)
    top_element = doc.documentElement

    query_element = doc.createElement("query")
    top_element.appendChild(query_element)
    text = doc.createTextNode(query)
    query_element.appendChild(text)

    id = 0
    for document in documents:
        document_element = doc.createElement("document")
        document_element.setAttribute('id', str(id))

        title_element = doc.createElement("title")
        document_element.appendChild(title_element)
        ttext = doc.createTextNode( document['Title'] )
        title_element.appendChild(ttext)

        snippet_element = doc.createElement("snippet")
        document_element.appendChild(snippet_element)
        stext = doc.createTextNode( document['Summary'] )
        snippet_element.appendChild(stext)

        top_element.appendChild(document_element)
        id += 1

    seal = doc.toxml('utf-8')
    return seal

def seal_query(query, documents):
    seal = u'<?xml version="1.0" encoding="UTF-8"?>'
    seal += u'<searchresult>'
    seal += u'<query>%s</query>' % query
    id = 0
    for document in documents:
        seal += u'<document id="%d">' % id
        #logging.debug(document['Title'])
        #logging.debug(document['Summary'])   
        seal += u'<title>' + document['Title'] + u'</title>'
        seal += u'<snippet>' + document['Summary'] + u'</snippet>'
        #seal += '<url>%s</url>' % document['ClickUrl'].decode('utf-8')
        seal += u'</document>'
        id += 1
    seal += u'</searchresult>'
    #logging.debug(seal)
    return seal

def get_cluster_results(c2stream):
    server = CLUSTER_SERVER #'http://128.36.117.51/rest/processor'
    #params = locals()
    params = {}
    params['c2stream'] = c2stream#.encode('utf-8')
    params['dcs.default.output'] = 'json'
    params['dcs.clusters.only'] = 'true'
    return _cluster_carrot(server, params)

def _cluster_carrot(server, form_fields):
    content_type, body = encode_multipart_formdata(form_fields)    

    h = myhttplib.HTTP(server)#'128.36.117.51'
    h.putrequest('POST', '/rest/processor')
    h.putheader('content-type', content_type)
    h.putheader('content-length', str(len(body)))
    h.endheaders()
    h.send(body)
    errcode, errmsg, headers = h.getreply()
    #result = h.file.read()
    response = StringIO(h.file.read())
    result = simplejson.load(response)
    return result

def get_search_results(appid, query, region ='us', type = 'all', results = 100, start = 0, 
                       format ='any', adult_ok = "", similar_ok = "", language = "", country = "", 
                       site = "", subscription = "", license = ''):
    base_url = u'http://search.yahooapis.com/WebSearchService/V1/webSearch?'
    params = locals()
    result = _query_yahoo(base_url, params)
    return result['ResultSet']

def _query_yahoo(base_url, params):
    params['output'] = 'json'
    params['query'] = urllib.quote(params['query'].encode('utf-8'))
    payload = urllib.urlencode(params)
    url = base_url + payload
    #logging.debug("payload: " + payload + " url: " + url)    
    #logging.debug(url)
    response = StringIO(urlfetch.fetch(url).content)
    #logging.debug(url)
    #logging.debug(str(urlfetch.fetch(url).content))
    result = simplejson.load(response)
    return result

'''
    Get Google Search Results
'''
def get_google_search_results(q, num = 100, complete = 1, hl = "en", start = 0, sa ='N', 
                              oe = 'UTF-8', ie = 'UTF-8'):
    #base_url = u'http://www.google.com/search?' #num=30&complete=1&hl=en&q=apple&start=30&sa=N
    params = locals()
    result = _query_google(params)
    return result

def _query_google(params):
    #params['q'] = urllib.quote(params['q'].encode('utf-8'))
    logging.debug(params['q'])
    payload = urllib.urlencode(params)
    path = "/search?" + payload
    logging.debug(path)
    conn = myhttplib.HTTPConnection("www.google.com")
    headers = {"User-agent": "Mozilla/5.0"}
    conn.request("GET", path, None, headers)
    response = conn.getresponse()
    data = response.read()
    conn.close()
    result = data#.decode('utf-8')
    #logging.debug(data)
    #result = parser_google_web(result)
    return result

def parser_google_web(data):
    soup = BeautifulSoup(data)
    vlis = soup.findAll('li', attrs={"class":"g"})
    
    results = []

    for vli in vlis:
        soup1 = BeautifulSoup(unicode(str(vli.contents[0]),'utf-8'))
        a =  soup1.findAll('a', attrs={"class":"l"})
        if len(a) == 0:
            continue
        #result = { 'Title': '', 'Summary': '', 'Url': ''}
        soup2 = BeautifulSoup(unicode(str(a[0]),'utf-8'))
        Title = unicode('', 'utf-8')
        ClickUrl = soup2.a['href']
        Url = unicode('', 'utf-8')
        Summary = unicode('', 'utf-8')
        for content in soup2.a.contents:
            Title += unicode(str(content), 'utf-8')

        soup3 = BeautifulSoup(unicode(str(vli.contents[1]), 'utf-8'))
        div =  soup3.find('div')
        if div == None:
            continue
        soup4 = BeautifulSoup(unicode(str(div),'utf-8'))
            
        for content in soup4.div.contents:
            soup5 = BeautifulSoup(unicode(str(content),'utf-8'))
            table = soup5.find('table')
            cite = soup5.find('cite')
            br = soup5.find('br')
            span = soup5.find('span')
            soup6 = BeautifulSoup(unicode(str(cite),'utf-8'))
            
            for cont in soup6.contents:
                soup7 = BeautifulSoup(unicode(str(cont),'utf-8'))
                cite = soup7.find('cite')
                if cite is not None:
                    soup8 = BeautifulSoup(unicode(str(cite),'utf-8'))
                    for cc in soup8.contents:
                        if cc is not None:
                            Url += unicode(str(cc), 'utf-8')
            if table == None  and cite == None and br == None and span == None:
                strc = unicode(str(content), 'utf-8')
                if strc.startswith('<a ') is False:
                    Summary += strc
            
        Title = Title.replace('<b>', '')
        Title = Title.replace('</b>', '')
        Title = Title.replace('<em>', '')
        Title = Title.replace('</em>', '')
        Summary = Summary.replace('<b>', '')
        Summary = Summary.replace('</b>', '')
        Summary = Summary.replace('<em>', '')
        Summary = Summary.replace('</em>', '')
        Url = Url.replace('<cite>', '')
        Url = Url.replace('</cite>', '')
        Url = Url.replace('<b>', '')
        Url = Url.replace('</b>', '')
        #result = { 'Title': Title, 'Summary': Summary, 'Url': Url, 'ClickUrl': ClickUrl}
        result = { 'Title': urllib.unquote(Title), 'Summary': urllib.unquote(Summary), 
                    'Url': urllib.unquote(Url), 'ClickUrl': urllib.unquote(ClickUrl)}
        results.append(result)

    return results[:100]


def _html2txt(s, hint = 'entity', code = 'UTF-8'): #code = 'ISO-8859-1'):
    """Convert the html to raw txt
    - suppress all return
    - <p>, <tr> to return
    - <td> to tab
    Need the foolwing regex:
    p = re.compile('(<p.*?>)|(<tr.*?>)', re.I)
    t = re.compile('<td.*?>', re.I)
    comm = re.compile('<!--.*?-->', re.M)
    tags = re.compile('<.*?>', re.M)
    version 0.0.1 20020930
    """

    p = re.compile('(<p.*?>)|(<tr.*?>)', re.I)
    t = re.compile('<td.*?>', re.I)
    comm = re.compile('<!--.*?-->', re.M)
    tags = re.compile('<.*?>', re.M)    
    
    s = s.replace('\n', '') # remove returns time this compare to split filter join
    s = p.sub('\n', s) # replace p and tr by \n
    s = t.sub('\t', s) # replace td by \t
    s = comm.sub('', s) # remove comments
    s = tags.sub('', s) # remove all remaining tags
    s = re.sub(' +', ' ', s) # remove running spaces this remove the \n and \t
    # handling of entities
    result = s
    pass
    return result

def getInlineImg(s):
    #logging.debug('HTML size: ' + str(len(s)))
    #logging.debug(s)
    
    txt_segs = re.split( r'src', s)
    txt_segs.pop(0)
    linx=[]
    MAX = 10
    #logging.debug('seg size: ' + str(len(txt_segs)))
    for linkBlock in txt_segs:
        matchResult = re.search(r'\s*=\s*\"([^\"]+)\"', linkBlock)
        if len(linx) > MAX:
            continue
        if matchResult: 
            res =  matchResult.group(1)
            if linx.count(res) > 0:
                continue
            if res.endswith('gif') is True or res.endswith('jpg') is True or res.endswith('bmp') is True or res.endswith('png') is True:
                linx.append( res )    
    #linx = re.findall('[^"]+jpg', s) + re.findall('[^"]+gif', s) + re.findall('[^"]+png', s)
    if len(linx) == 0:
        #logging.debug('0 need to add')
        linx.append('')
    return linx


def html2imgs(url):
    #logging.debug('IMG: ' + url)
    result = []
    s = ''
    try:
        urlresult = urlfetch.fetch(url)
        if urlresult.status_code == 200:
            s = urlresult.content
    except apiproxy_errors.DeadlineExceededError, e:
        return result #self.failure(e.message)
    except urlfetch.InvalidURLError, e:
        return result
    except:
        return result
    if len(s) == 0:
        return result
    imgPaths=getInlineImg(s)
    #logging.debug('IMGS: ' + str(len(imgPaths)))
    #logging.debug('image number: ' + str(len(imgPaths)))
    baseurl = url[0:url.rfind('/')]
    base2url = url[0:url.rfind('/', len(baseurl))]
    for aPath in imgPaths:
        if len(result) > 15:
            break
        if len(aPath) > 500:
            continue
        try:
            #logging.debug(aPath)
            if aPath.startswith('http://') is True:
                result.append(aPath)
            elif aPath.startswith('..'):
                result.append(base2url + aPath[2:])
            elif aPath.startswith('//'):
                result.append('http:' + aPath)
            elif aPath == '':
                result.append('#')  # didn't find img, give a '#'
            else:
                if aPath.startswith('/') is not True:
                    aPath = '/' + aPath    
                result.append(baseurl + aPath)
        except UnicodeDecodeError, e:
                continue
        except:
            continue

    return result

def html2txt(url):
    # Get something to work with.
    #url = u"http://147.8.177.100:8080/"
    try:
        s = urlfetch.fetch(url).content
    except urlfetch.Error, e:
        return ''#self.failure(e.message)
    #return s
    return _html2txt(s)

