#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is not supported by Google
#
"""
Sample script to convert a Google Merchant Center products feed file
into the XML format that the Google Search Appliance can process.

usage: 
    python feed2gsa.py 
       --infile=<filename> 
       --filetype=tsv|rss|atom|scapi 
       --operation=add|delete'
    
"""

from xml.sax.saxutils import escape
from xml.sax.saxutils import quoteattr
from xml.sax.saxutils import unescape
import xml.dom.minidom
import getopt
import sys
import csv
import time
from datetime import datetime
import libxml2
from libxml2 import xmlNode
import zlib
import base64
import urllib,urlparse,urllib2


class feed2gsa(object):
    
  def __init__(self, in_file, file_type, operation):
    self.in_file_name = in_file
    self.operation = operation
       
    rownumber = 0

    header= ('<?xml version="1.0" encoding="UTF-8"?>\n'
                '<!DOCTYPE gsafeed PUBLIC "-//Google//DTD GSA Feeds//EN" "">\n'
                '<gsafeed>\n'
                '<header>\n'
                '<datasource>%s</datasource>\n'
                '<feedtype>incremental</feedtype>\n'
                '</header>\n'
                '<group>\n') % 'gcs2gsa.py'
    footer = '</group>\n</gsafeed>'

    self.log('Reading from ' + self.in_file_name)
    
    valid_attributes = ['title','description','id','link','price', 'condition',
                        'gtin','image link','google product category']    
    
    if (file_type=='tsv'):
      csreader = csv.reader(open(self.in_file_name, 'rb'), delimiter='\t', quotechar=None)
      
      attrib_name = []
      now = time.time()
      num_columns = 0 
      
      offers = ''      
      for row in csreader:
        entry = ''
        meta = ''             
        rownumber = rownumber + 1

        if rownumber == 1:
          for i, v in enumerate(row):
            if v != '':
              if v.startswith('c:') or v.startswith('g:'):
                attrib_name.append(v.split(':')[1])
              else:
                attrib_name.append(v)
          num_columns = len(attrib_name)   
        else:       
          if (len(row)!=num_columns):
            self.log("Too many data columns defined: %d(header) != %d(this)" % (num_columns, len(row)))
            for i, v in enumerate(row):
              if i < num_columns:
                s = attrib_name[i]
              else:
                s = "N/A"
              #self.log( "[%d:%s] %s" % (i+1, s, v))
            self.log("This item is skipped.")
          else:
            title = ''
            description = ''
            pid=''
            for i, v in enumerate(row):          
              if (attrib_name[i] == 'id'):
                pid = v          
              if (attrib_name[i] == 'link'):
                pr = self.__getPageRank(v)
                original_link =v
                if (pid == ''):
                  self.log("ID column defined after 'link'.  Please rearrange columns in the source feed file and make the ID column first")
                  link = '<record pagerank="'+ str(pr) + '" url=' + self.remove_control_chars_and_escape(original_link) + ' action="' + self.operation + '" mimetype="text/html" lock="true">\n'                        
                else:
                  params = {'pid':pid}
                  link = self.addGetParameters(original_link, params)          
                h = '<record pagerank="'+ str(pr) + '" url=' + self.remove_control_chars_and_escape(link) + ' action="' + self.operation + '" mimetype="text/html" lock="true">\n'      
              elif (attrib_name[i] == 'description'):
                description = v
                
              elif (attrib_name[i] == 'product_type'):
                y = 1
                curr = ''
                c = v.split('>')
                for jj in c:
                  if curr != '':
                    curr = curr + ' > ' + jj.replace('"','').strip()
                  else:
                    curr = jj.replace('"','').strip()
                  meta = meta +  '<meta name="product_type_' + str(y) + '" content='  + self.remove_control_chars_and_escape(jj.replace('"','').strip()) + '/>\n'
                  meta = meta +  '<meta name="product_type_level_' + str(y) + '" content='  + self.remove_control_chars_and_escape(curr) + '/>\n'
                  y = y+1                
                  
              elif (attrib_name[i] == 'title'):
                title = v 
              # uncomment if you only want certain attributes from the source feed to get pushed to the GSA
              #if (attrib_name[i] in valid_attributes and len(v)>0):
              if (len(v)>0):                
                meta = meta + '<meta name=' + self.remove_control_chars_and_escape(attrib_name[i].replace(' ', '_')) + ' content=' + self.remove_control_chars_and_escape(v) + '/>\n'
            cc = '<html><head><title>%s</title></head><body>%s<body></html>'%(title,description)
            #cc = base64.b64encode(zlib.compress(cc))          
            content = '<content><![CDATA[' + cc + ']]></content>\n'                  
            entry = h + '<metadata>' + meta + '</metadata>\n' + content + '</record>\n'
            offers = offers + entry
            if (rownumber%100000==0):
              out_file = open(str('gsa_' + self.operation + '_' + str(rownumber) +'.xml'), 'w') 
              b = header +  offers + footer
              out_file.write(b)
              out_file.close()
              offers = ''
              self.log("writing to: " + str('gsa_' + self.operation + '_' + str(rownumber) +'.xml') )   
      if (offers != ''):
        out_file = open(str('gsa_' + self.operation + '_' + str(rownumber) +'.xml'), 'w')
        b = header +  offers + footer
        out_file.write(b)
        out_file.close()
        offers = ''
        self.log("final writing to: " + str('gsa_' + self.operation + '_' + str(rownumber) +'.xml') )                 
    elif  (file_type=='atom'):
      try:   
        source = open(in_file)
        doc = libxml2.parseDoc(source.read())
        ctxt = doc.xpathNewContext()
        offers = ''
        ctxt.xpathRegisterNs('g', "http://base.google.com/ns/1.0")
        ctxt.xpathRegisterNs('c', "http://base.google.com/cns/1.0")  
        ctxt.xpathRegisterNs('atom', "http://www.w3.org/2005/Atom")  
        nl_entries = ctxt.xpathEval('/atom:feed/atom:entry')   
        for n_entry in nl_entries:
          rownumber = rownumber + 1
          ctxt.setContextNode(n_entry)          
          id = urllib2.unquote(ctxt.xpathEval('g:id')[0].content)           
          title = escape(ctxt.xpathEval('atom:title')[0].content)        
          link = ctxt.xpathEval('g:link')[0].content
          params = {'pid':id}
          link = escape(self.addGetParameters(link, params))          
          price = ctxt.xpathEval('g:price')[0].content
          price = str(("%.2f" % round(float(price),2)))
          description = escape(ctxt.xpathEval('g:description')[0].content)
          
          pr = self.__getPageRank(link)          
          h = '<record pagerank="'+ str(pr) + '" url=' + self.remove_control_chars_and_escape(link) +' action="' + self.operation + '" mimetype="text/html" lock="true">\n'  
          meta = ''
          for n in n_entry.children:
            if n.type == "element":
              # uncomment if you only want certain attributes from the source feed to get pushed to the GSA
              #if (n.name not in ['price'] and n.name in valid_attributes):
              if (n.name not in ['price', 'shipping']):                
                if (len(self.remove_control_chars_and_escape(n.content))>0):
                  meta = meta +  '<meta name="' + n.name + '" content=' + self.remove_control_chars_and_escape(n.content.replace(' ','_')) + '/>\n'
              if (n.name in 'product_type'):
                y = 1
                curr = ''
                c = unescape(n.content).split('>')
                for jj in c:
                  if curr != '':
                    curr = curr + ' > ' + jj.replace('"','').strip()
                  else:
                    curr = jj.replace('"','').strip()
                  meta = meta +  '<meta name="product_type_' + str(y) + '" content='  + self.remove_control_chars_and_escape(jj.replace('"','').strip()) + '/>'
                  meta = meta +  '<meta name="product_type_level_' + str(y) + '" content='  + self.remove_control_chars_and_escape(curr) + '/>'
                  y = y+1                 
                                    
          meta = meta + '<meta name="price" content="' + price + '"/>\n'
          cc = '<html><head><title>%s</title></head><body>%s<body></html>'%(title,description)
          content = '<content><![CDATA[' + cc  + ']]></content>\n'  
          entry = h + '<metadata>' + meta + '</metadata>\n' + content + '</record>\n' 
          offers = offers + entry
          if (rownumber%100000==0):
            out_file = open(str('gsa_' + self.operation + '_' + str(rownumber) +'.xml'), 'w') 
            b = header +  offers + footer
            out_file.write(b)
            out_file.close()
            offers = ''       
        ctxt.xpathFreeContext()
        doc.freeDoc()
        if len(offers) > 0:
          out_file = open(str('gsa_' + self.operation + '_' + str(rownumber) +'.xml'), 'w') 
          b = header +  offers + footer
          out_file.write(b)
          out_file.close()
          self.log("writing to: " + str('gsa_' + self.operation + '_' + str(rownumber) +'.xml') ) 
      except Exception, e:
        self.log('error: ' + str(e)) 
    elif  (file_type=='rss'):
      try:   
        source = open(in_file)
        doc = libxml2.parseDoc(source.read())
        ctxt = doc.xpathNewContext()
        offers = ''

        ctxt.xpathRegisterNs('g', "http://base.google.com/ns/1.0")
        ctxt.xpathRegisterNs('c', "http://base.google.com/cns/1.0")  

        nl_items = ctxt.xpathEval('/rss/channel/item')   
        for n_item in nl_items:
          rownumber = rownumber + 1
          ctxt.setContextNode(n_item)          
          id = urllib2.unquote(ctxt.xpathEval('g:id')[0].content)           
          title = escape(ctxt.xpathEval('title')[0].content)        
          link = ctxt.xpathEval('link')[0].content
          params = {'pid':id}
          link = escape(self.addGetParameters(link, params))          
          price = ctxt.xpathEval('g:price')[0].content
          #hack...rss price could be in the format <g:price>26.99 EUR</g:price>...i'm
          #just ignoring the units bit for now
          price = price.split(" ")[0]
          price = str(("%.2f" % round(float(price),2)))
          description = escape(ctxt.xpathEval('description')[0].content)
          pr = self.__getPageRank(link)            
          h = '<record pagerank="'+ str(pr) + '" url=' + self.remove_control_chars_and_escape(link) +' action="' + self.operation + '" mimetype="text/html" lock="true">\n'  
          meta = ''
                    
          for n in n_item.children:
            if n.type == "element":
              #if (n.name not in ['price'] and n.name in valid_attributes):
              if (n.name not in ['price','shipping']):                
                if (len(self.remove_control_chars_and_escape(n.content))>0):
                  meta = meta +  '<meta name="' + n.name + '" content=' + self.remove_control_chars_and_escape(n.content.replace(' ','_')) + '/>\n'
              if (n.name in 'product_type'):
                y = 1
                curr = ''
                c = unescape(n.content).split('>')
                for jj in c:
                  if curr != '':
                    curr = curr + ' > ' + jj.replace('"','').strip()
                  else:
                    curr = jj.replace('"','').strip()
                  meta = meta +  '<meta name="product_type_' + str(y) + '" content='  + self.remove_control_chars_and_escape(jj.replace('"','').strip()) + '/>'
                  meta = meta +  '<meta name="product_type_level_' + str(y) + '" content='  + self.remove_control_chars_and_escape(curr) + '/>'
                  y = y+1                 
                  
          meta = meta + '<meta name="price" content="' + price + '"/>\n'
          cc = '<html><head><title>%s</title></head><body>%s<body></html>'%(title,description)
          content = '<content><![CDATA[' + cc  + ']]></content>\n'  
          entry = h + '<metadata>' + meta + '</metadata>\n' + content + '</record>\n' 
          offers = offers + entry                     
          
        ctxt.xpathFreeContext()
        doc.freeDoc()
        if len(offers) > 0:
          out_file = open(str('gsa_' + self.operation + '_' + str(rownumber) +'.xml'), 'w') 
          b = header +  offers + footer
          out_file.write(b)
          out_file.close()
          self.log("writing to: " + str('gsa_' + self.operation + '_' + str(rownumber) +'.xml') ) 
      except Exception, e:
        self.log('error: ' + str(e))         
    elif  (file_type=='scapi'):
      try:
        rownumber = 0
        source = open(in_file)
        doc = libxml2.parseDoc(source.read())
        ctxt = doc.xpathNewContext()
        offers = ''

        ctxt.xpathRegisterNs('atom', 'http://www.w3.org/2005/Atom')
        ctxt.xpathRegisterNs('sc',  'http://schemas.google.com/structuredcontent/2009') 
        ctxt.xpathRegisterNs('scp', 'http://schemas.google.com/structuredcontent/2009/products' )
        ctxt.xpathRegisterNs('app',  'http://www.w3.org/2007/app' )
        ctxt.xpathRegisterNs('batch', 'http://schemas.google.com/gdata/batch' )        
          
        nl_offers = ctxt.xpathEval('/atom:feed/atom:entry')
        for n_offer in nl_offers:
          rownumber = rownumber + 1
        
          ctxt.setContextNode(n_offer)          
          operation_type = ctxt.xpathEval('batch:operation')[0].prop('type')
          
          if (operation_type.lower() == 'insert' or operation_type.lower() == 'update'):
            entry = ''
            meta = ''              
            title = ''
            link = ''
            description = ''
                      
            provided_id = ctxt.xpathEval('sc:id')[0].content
            title = ctxt.xpathEval('atom:title')[0].content
            description = ctxt.xpathEval('atom:content')[0].content
            link = ctxt.xpathEval("atom:link[@rel='alternate']")[0].prop('href')
            params = {'pid':provided_id}
            link = escape(self.addGetParameters(link, params)) 

            pr = self.__getPageRank(link)            
            h = '<record pagerank="'+ str(pr) + '" url=' + self.remove_control_chars_and_escape(link) +' action="add" mimetype="text/html" lock="true">\n'  
            nl_other_attributes = ctxt.xpathEval("*")
            for n_other in nl_other_attributes:
              #if (n_other.name not in ['price'] and n_other.name in valid_attributes):
              if (n_other.name not in ['control','operation','group','tax',
                                       'shipping', 'shipping_weight']):
                if (self.remove_control_chars_and_escape(n_other.content) != '' and n_other.content != ''):
                  if (n_other.name == 'attribute'):
                    meta = meta +  '<meta name="' + n_other.prop('name').replace(' ','_') + '" content=' + self.remove_control_chars_and_escape(n_other.content.replace(' ','_')) + '/>\n'                    
                  else:
                    meta = meta +  '<meta name="' + n_other.name.replace(' ','_') + '" content=' + self.remove_control_chars_and_escape(n_other.content.replace(' ','_')) + '/>\n'
                if (n_other.name in 'product_type'):
                  y = 1
                  curr = ''
                  c = unescape(n_other.content).split('>')
                  for jj in c:
                    if curr != '':
                      curr = curr + ' > ' + jj.replace('"','').strip()
                    else:
                      curr = jj.replace('"','').strip()
                    meta = meta +  '<meta name="product_type_' + str(y) + '" content='  + self.remove_control_chars_and_escape(jj.replace('"','').strip()) + '/>\n'
                    meta = meta +  '<meta name="product_type_level_' + str(y) + '" content='  + self.remove_control_chars_and_escape(curr) + '/>\n'
                    y = y+1                  
                    
            cc = '<html><head><title>%s</title></head><body>%s<body></html>'%(title,description)
            content = '<content><![CDATA[' + cc  + ']]></content>\n'  
            entry = h + '<metadata>\n' + meta + '</metadata>\n' + content + '</record>\n' 
            offers = offers + entry              
          else:
            provided_id =  ctxt.xpathEval('atom:id')[0].content
            self.log("Skipping Delete operation for SCAPI feed types " + provided_id)
            #SCAPI delete feeds don't need the <link/> attribute but if they happen to add it in...
            #link = ctxt.xpathEval("atom:link[@rel='alternate']")[0].prop('href')
            # offers = offers + '<record url=' + self.remove_control_chars_and_escape(link) +' action="delete" mimetype="text/html" lock="true">\n'
    
        ctxt.xpathFreeContext()
        doc.freeDoc()
        if len(offers) > 0:
          out_file = open(str('gsa_' + self.operation + '_' + str(rownumber) +'.xml'), 'w') 
          b = header +  offers + footer
          out_file.write(b)
          out_file.close()
          self.log("writing to: " + str('gsa_' + self.operation + '_' + str(rownumber) +'.xml') ) 
      except Exception, e:
        self.log('error: ' + str(e))
        
    self.log("Done processing feedfile")
     
  def __getPageRank(self,link): 
    return 98
        
  def remove_control_chars_and_escape(self,s):
    return quoteattr(s)
  
  def log(self,msg):
    print ('[%s] %s') % (datetime.now(), msg)
    
  def __printFeedFile(self,batch_xml):
    try:
      self.log('Attempting to parse batch XML file.')
      xmldoc = xml.dom.minidom.parseString(batch_xml)
      self.log('XML batch file well formed.')
      self.log(xmldoc.toprettyxml())
    except Exception, e:
      self.log('Error: ' + str(e))
      self.log('XML batch file not well formed...continue anyway' )    
    
  #http://stackoverflow.com/questions/2506379/add-params-to-given-url-in-python
  def addGetParameters(self,url, newParams):
      (scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
      queryList = urlparse.parse_qsl(query, keep_blank_values=True)
      for key in newParams:
          queryList.append((key, newParams[key]))
      return urlparse.urlunparse((scheme, netloc, path, params, urllib.urlencode(queryList), fragment))      

    
if __name__ == '__main__':
  in_file = None
  file_type='csv'
  operation = 'add'
  try:
    opts, args = getopt.getopt(sys.argv[1:], None, ["infile=","filetype=", "operation="])
  except getopt.GetoptError:
    print 'Please specify --infile= --filetype=tsv|atom|rss|scapi --operation=add|delete'
    sys.exit(1)

  for opt, arg in opts:
    if opt == "--infile":
      in_file = arg
    if opt == "--operation":
      operation = arg
      if operation not in ['add', 'delete']:
        print '--operation must be add, delete' 
        sys.exit()
    if opt == "--filetype":
      file_type = arg
      if file_type not in ['tsv', 'atom', 'rss', 'scapi']:
        print '--file_type must be tsv, atom, rss or scapi' 
        sys.exit()

  feed2gsa(in_file, file_type, operation)