#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is not supported by Google
#
"""
Script to feed product data into a GSA using offer data already uploaded into 
MerchantCeneter.
 
To install:
apt-get update
apt-get install sqlite3 python-libxml2

./mc2gsa.py --username=<YOUR_MERCHANTCENTERLOGIN>@gmail.com 
            --password=<YOUR_MERCHANTCENTERPASSWORD>  
            --cid=<YOUR_MERCHANT_ID> 
            --gsa=http://<YOURGSA>:19900/xmlfeed


for feedergate 
  HTTP:  --gsa=https://<YOURGSA>:19900/xmlfeed
  HTTPS: --gsa=https://<YOURGSA>:19902/xmlfeed


or:          
  m = mc2gsa(username, password, cid, gsa)
  
  m.processAllProducts()   : 
                * connects to merchantcenter
                * note the current_timestamp
                * download each product
                * upload products to GSA as a feed
                * update sqllite database with each item and  the current_timestamp
                * at the end of feed processing, if the current_timestamp of any
                  item is before current_timestamp, then
                       ** send a delete feed to the GSA for this item
                       ** delete this item from the sqllite database
                                          
  m.getSingleOffer(provided_id)  :  
                * Downloads a single item given its id key
                * uploads this item to GSA

  m.removeSingleOffer(provided_id)
                * looks up the item's URL from the sqlite3 database
                * submits a DELETE feed using provided_id's URL and then deletes the 
                  item from the local sqlite3 database
              
SQLite3 DB tools:
http://sqlitebrowser.sourceforge.net/
https://code.google.com/p/sqlite-manager/
            
"""
import datetime
import urllib,urlparse,urllib2
import sys
import time
import libxml2
import rfc822
from xml.sax.saxutils import escape
from xml.sax.saxutils import unescape
from xml.sax.saxutils import quoteattr
import re
import getopt
import xml.dom.minidom
import zlib
import base64
import string
import sqlite3
import mimetypes
import httplib, ssl, socket
from dateutil.parser import parse
from dateutil import tz

class mc2gsa(object):
    
  def __init__(self, username, password, cid, gsa):    
    self.username = username
    self.password = password
    self.gsa = gsa
    self.cid = cid
    self.conn = None
    self.cursor = None
    self.token = None
                
    self.header= ('<?xml version="1.0" encoding="UTF-8"?>'
                  '<!DOCTYPE gsafeed PUBLIC "-//Google//DTD GSA Feeds//EN" "">'
                  '<gsafeed>'
                  '<header>'
                  '<datasource>%s</datasource>'
                  '<feedtype>incremental</feedtype>'
                  '</header>'
                  '<group>') %self.cid                
    self.footer = ('</group></gsafeed>')
    self.token = self.__getAuthToken(username, password)
    if self.token == None:
      self.log("Unable to acquire Auth token, exiting")
      sys.exit()
    self.__initializeDB(cid)

  def __initializeDB(self, cid):
    self.log("Initializing DB: itemstatus_" + cid + ".sqlite")
    try:
      self.conn = sqlite3.connect('itemstatus_'+cid+'.sqlite',check_same_thread = False)
      self.cursor = self.conn.cursor()
    except sqlite3.connect:
      self.log("ERROR Connecting to DB")  

    try:
      self.cursor.execute('CREATE TABLE mc_item (id text primary key unique, cid text, link text, canonical_id text, last_sync DATETIME, expiration_date DATETIME)')
      self.cursor.execute('create index idx_id ON mc_item(id);')  
      self.cursor.execute('create index idx_last_sync ON mc_item(last_sync);') 
      #self.cursor.execute('create index idx_expiration ON mc_item(expiration_date);')   
    except sqlite3.OperationalError, e:
      if (str(e)!='table mc_item already exists'):
        self.log(str(e))  
        
  def closeDB(self):
    if self.conn != None:
      self.log("Closing DB connection")
      self.conn.close()

  def processAllProducts(self):
    self.log("Start Processing")
    self.now = time.mktime(datetime.datetime.now().timetuple())
    baseURL = ("https://content.googleapis.com/content/v1/" + cid + "/items/products/generic?max-results=200")
    self.__getOffers(self.token,self.cid,baseURL)
    self.__removeOffersBefore(self.now)
    self.log("End Processing")
    
  def getSingleOffer(self,pid):
    #offer_prefix = 'online:EN:US:'
    #pid = offer_prefix + pid
    self.log("Recalling " + pid)
    self.now = time.mktime(datetime.datetime.now().timetuple())
    baseURL = ("https://content.googleapis.com/content/v1/" + cid + "/items/products/schema/" + pid)
    self.__getOffers(self.token,self.cid,baseURL)
    #self.__removeProducts(self.now)
    self.log("End Processing")          
    
  def __getOffers(self, token, cid, baseURL):

    has_next_link = True;
    total_items_processed = 0
    feed_counter = 0
    items = '' 
    while (has_next_link):   
      has_next_link = False;
      urllib2.install_opener(urllib2.build_opener(HTTPSHandlerV3()))
      req = urllib2.Request(baseURL)      
      req.add_header("Authorization", "GoogleLogin auth=" + token)
  
      try:
          response = urllib2.urlopen(req)
      except urllib2.HTTPError, e:
          self.log('The server couldnt fulfill the request.')
          self.log('Error code: ' + str(e.code))
          sys.exit(1)
      except urllib2.URLError, e:
          self.log('We failed to reach a server.')
          self.log( 'Reason: '+ str(e.reason))
          sys.exit(1)
  
      content = response.read()    
      doc = libxml2.parseDoc(content)
      #xmldoc = xml.dom.minidom.parseString(content)
      #self.log(xmldoc.toprettyxml())      
      ctxt = doc.xpathNewContext()
      ctxt.xpathRegisterNs('openSearch', "http://a9.com/-/spec/opensearch/1.1/")
      ctxt.xpathRegisterNs('s', "http://www.google.com/shopping/api/schemas/2010")  
      ctxt.xpathRegisterNs('atom', "http://www.w3.org/2005/Atom")  
      ctxt.xpathRegisterNs('app', "http://www.w3.org/2007/app")
      ctxt.xpathRegisterNs('gd', "http://schemas.google.com/g/2005")
      ctxt.xpathRegisterNs('sc', "http://schemas.google.com/structuredcontent/2009")
      ctxt.xpathRegisterNs('scp', "http://schemas.google.com/structuredcontent/2009/products")
         
      nl_links = ctxt.xpathEval('/atom:feed/atom:link')
      for n_link in nl_links:
        rel = n_link.xpathEval('@rel')[0].content
        if (rel == "next"):
          has_next_link = True;
          baseURL = n_link.xpathEval('@href')[0].content 
                              
      nl_offers = ctxt.xpathEval('/atom:feed/atom:entry')
      if len(nl_offers) == 0:
        nl_offers = ctxt.xpathEval('/atom:entry')
        
      for n_offer in nl_offers:
        ctxt.setContextNode(n_offer) 
        image_link = None   
        title = ctxt.xpathEval('atom:title')[0].content
        provided_id = ctxt.xpathEval('sc:id')[0].content  
        enlisted_usecases = []
        required_usecases = ['CommerceSearch', 'ProductSearch']
        nl_usecases = ctxt.xpathEval("app:control/sc:status[@status='enlisted']")
        for n_usecase in nl_usecases:          
          enlisted_usecases.append(n_usecase.prop('dest'))
        skip_offer = True
        for c in required_usecases:
          if c in enlisted_usecases:
            skip_offer = False
        if skip_offer:
          self.log("Not enlisted for requried usecase(s). Skipping product " + provided_id)
        if (len(ctxt.xpathEval('atom:content'))>0):
          description = ctxt.xpathEval('atom:content')[0].content
        else:   
          description = ''
        canonical_id = urllib2.unquote(ctxt.xpathEval('atom:id')[0].content)        
        
        expiration_date = ctxt.xpathEval('sc:expiration_date')[0].content  
        if len(ctxt.xpathEval('sc:image_link')) > 0:
          image_link = escape(ctxt.xpathEval('sc:image_link')[0].content) 
        
        t = time.mktime(datetime.datetime.strptime(ctxt.xpathEval('atom:updated')[0].content, "%Y-%m-%dT%H:%M:%S.%fZ").timetuple())
        last_modified = rfc822.formatdate ( rfc822.mktime_tz( rfc822.parsedate_tz( datetime.datetime.fromtimestamp(float(t) ).strftime ( "%a, %d %b %Y %H:%M:%S" ) ) ) )
        nl_links = ctxt.xpathEval('atom:link')               
        for n_link in nl_links:
          if n_link.prop('rel') == 'alternate':
            #append the providedID to the url to make unique URLs
            original_link = n_link.prop('href')
            params = {'pid':provided_id}
            link = escape(self.addGetParameters(original_link, params))
        meta = '<metadata>'
        nl_attributes = ctxt.xpathEval('sc:attribute')        
        for n_attribute in nl_attributes:
          ctxt.setContextNode(n_attribute)
          v = self.__remove_control_chars_and_escape(ctxt.xpathEval('.')[0].content)
          if v!='':
            meta = meta + '<meta name="' + n_attribute.prop('name').replace(' ','_') + '" content='  + v + '/>' 
            if (n_attribute.prop('name').replace(' ','_') == 'product_type'):
              y = 1
              curr = ''
              c = unescape(v).split('>')
              for jj in c:
                if curr != '':
                  curr = curr + ' > ' + jj.replace('"','').strip()
                else:
                  curr = jj.replace('"','').strip()
                meta = meta +  '<meta name="product_type_' + str(y) + '" content='  + self.__remove_control_chars_and_escape(jj.replace('"','').strip()) + '/>'
                meta = meta +  '<meta name="product_type_level_' + str(y) + '" content='  + self.__remove_control_chars_and_escape(curr) + '/>'
                y = y+1            
        meta = meta + '<meta name="title" content=' + self.__remove_control_chars_and_escape(title) + '/>' 
        meta = meta + '<meta name="id" content="' + escape(provided_id) + '"/>' 
        if image_link != None:
          meta = meta + '<meta name="image_link" content="' + escape(image_link) + '"/>'
        if description != '':
          meta = meta + '<meta name="description" content=' + self.__remove_control_chars_and_escape(description) + '/>'
        meta = meta + '<meta name="expiration_date" content="' + escape(expiration_date) + '"/>'         
        meta = meta + '</metadata>'      
        pr = self.__getPageRank(original_link,provided_id)
        if not skip_offer:
          items = items + '<record pagerank="' + str(pr) + '" url="' + link + '" displayurl=' +  self.__remove_control_chars_and_escape(original_link) + ' last-modified="' + last_modified + '" mimetype="text/html" action="add" lock="true">'
          cc = '<html><head><title>%s</title></head><body>%s<body></html>'%(title,description)
          #content = base64.b64encode(zlib.compress(cc))
          #items = items + meta + '<content encoding="base64compressed">' +  content +'</content>'
          items = items + meta + '<content><![CDATA[' +  cc +']]></content>'          
          items = items + '</record>'
          u_expiration_time = time.mktime(parse(expiration_date).timetuple())
          self.__updateItemStatus(provided_id,cid,original_link,canonical_id,self.now,u_expiration_time)
        total_items_processed = total_items_processed + 1
      if  (has_next_link == False or total_items_processed%20000==0):
        self.conn.commit() 
        xmlfilename = self.cid + '_' + str(feed_counter) + '.xml'
        f = open(xmlfilename, 'w')
        s = self.header + items + self.footer
        #xmldoc = xml.dom.minidom.parseString(s)
        #self.log(xmldoc.toprettyxml())        
        items = ''
        f.write (s)                
        f.close()
        self.log("Feed written to : " + xmlfilename)        
        
        self.__pushfeed(self.gsa, xmlfilename, feed_counter)
        feed_counter = feed_counter + 1
        
      ctxt.xpathFreeContext()  
      doc.freeDoc()
      self.log("Number of items processed: " + str(total_items_processed))
      
  def __updateItemStatus(self,provided_id,cid,link,canonical_id,sync_time,expiration_time):    
    try:
      stmt = ('INSERT OR REPLACE INTO  mc_item (id, cid, link,canonical_id,last_sync, expiration_date) VALUES ("%s","%s","%s","%s","%s","%s")')%(provided_id, cid,link,canonical_id,sync_time, expiration_time)
      self.cursor.execute(stmt) 
    except sqlite3.OperationalError, e:
      self.log(str(e))  
      
  def removeSingleOffer (self, pid): 
    self.log("Deleting item: " + pid)
    try:
      link = ''
      for row in self.cursor.execute('select id,link,canonical_id from mc_item where id="%s"' % pid ):
        link = row[1]
      if link == '':
        self.log('Entry not found in DB, not submitting delete feed')
        return
      item_to_delete = '<record url="%s"  mimetype="text/html" action="delete"/>\n' % link
      s = self.header + item_to_delete + self.footer
      xmlfilename = self.cid + '_delete' + '.xml'
      f = open(xmlfilename, 'w')
      #xmldoc = xml.dom.minidom.parseString(s)
      #self.log(xmldoc.toprettyxml())
      f.write (s)        
      self.log("Delete feed written to : " + xmlfilename)
      f.close() 
      resp = self.__pushfeed(self.gsa, xmlfilename,0) 

      if resp:
        self.log("Delete Feed successful, commit changes to db")        
        self.cursor.execute('delete from mc_item where id = "' +  pid + '"')
        self.conn.commit()
    except sqlite3.OperationalError, e:
      self.log(str(e))    
      
  def __removeOffersBefore(self,now):
    entry = '' 
    try:
      for row in self.cursor.execute('select id,link from mc_item where last_sync <' + str(now) ):
        item_to_delete = '<record url="%s"  mimetype="text/html" action="delete"/>\n' % row[1]
        entry = entry + item_to_delete
        self.log('Deleting itemID: ' + row[0] )
      #for row in self.cursor.execute('select id,link from mc_item where expiration_date <' + str(now) ):
      #  item_to_delete = '<record url="%s"  mimetype="text/html" action="delete"/>\n' % row[1]
      #  entry = entry + item_to_delete
      #  self.log('Deleting Expired itemID: ' + row[0] )        
      if (entry != ''):
        s = self.header + entry + self.footer
        xmlfilename = self.cid + '_delete' + '.xml'
        f = open(xmlfilename, 'w')
        #xmldoc = xml.dom.minidom.parseString(s)
        #self.log(xmldoc.toprettyxml())
        f.write (s)        
        self.log("Delete feed written to : " + xmlfilename)
        f.close() 
        resp = self.__pushfeed(self.gsa, xmlfilename,0) 
        if resp:
          self.log("Delete Feed successful, commit changes to db")        
          self.cursor.execute('delete from mc_item where last_sync <' + str(now))
          self.conn.commit()
    except sqlite3.OperationalError, e:
      self.log(str(e))
      
  def __getPageRank(self,link,provided_id):
    return 98
  
  #http://stackoverflow.com/questions/2506379/add-params-to-given-url-in-python
  def addGetParameters(self,url, newParams):
      (scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
      queryList = urlparse.parse_qsl(query, keep_blank_values=True)
      for key in newParams:
          queryList.append((key, newParams[key]))
      return urlparse.urlunparse((scheme, netloc, path, params, urllib.urlencode(queryList), fragment))  
  
  def __pushfeed(self,url,xmlfilename,feed_sequence):
    self.log('Connecting to ' + url + ' to transmit feed: ' + xmlfilename)
    params = []
    params.append(("feedtype", 'incremental'))
    params.append(("datasource", 'feed_' + self.cid + '_' + str(feed_sequence)))    
    data=('data',xmlfilename,open(xmlfilename,'r').read())
    request_url=self.__post_multipart(url,params,(data,))
    response = ''
    try:
      urllib2.install_opener(urllib2.build_opener(HTTPSHandlerV3()))
      response = urllib2.urlopen(request_url).read()
      self.log("Submitting feed to GSA: " + xmlfilename + " --> " + response)
    except urllib2.HTTPError, e:
      self.log('Unable to Push Feed to GSA.')
      self.log('Error code: ' + str(e))
      return False
    except urllib2.URLError, e:
      self.log('Failed to connect to GSA.')
      self.log( 'Reason: ' + str(e.reason ))
      return False
    if response == 'Success':
      return True
    return False
 
  def __post_multipart(self,theurl, fields, files):
    content_type, body = self.__encode_multipart_formdata(fields, files)
    headers={}
    headers['Content-type']=content_type
    headers['Content-length']=str(len(body))
    urllib2.install_opener(urllib2.build_opener(HTTPSHandlerV3()))
    return urllib2.Request(theurl,body,headers)

  def __encode_multipart_formdata(self,fields, files):
    BOUNDARY = '----------boundary_of_feed_data$'
    CRLF = '\r\n'
    L = []
    for (key, value) in fields:
        L.append('--' + BOUNDARY)
        L.append('Content-Disposition: form-data; name="%s"' % key)
        L.append('')
        L.append(value)
    for (key, filename, value) in files:
        L.append('--' + BOUNDARY)
        L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
        L.append('Content-Type: %s' % self.__get_content_type(filename))
        L.append('')
        L.append(value)
    L.append('--' + BOUNDARY + '--')
    L.append('')
    body = CRLF.join(L)
    content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
    return content_type, body

  def __get_content_type(self,filename):
    return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
  
  def __remove_control_chars_and_escape(self,s):
    return quoteattr(s)
    
  def log(self,msg):
    print ('[%s] %s') % (datetime.datetime.now(), msg)    
    sys.stdout.flush()
 
  def __getAuthToken(self, email, passwd):

      authURL = "https://www.google.com/accounts/ClientLogin"
      params = urllib.urlencode({'Email': email, 'Passwd': passwd, 'service': 
                                 'structuredcontent', 'source': 'Test'})
      headers = {"Content-type": "application/x-www-form-urlencoded"}
      urllib2.install_opener(urllib2.build_opener(HTTPSHandlerV3()))
      req = urllib2.Request(authURL, params, headers)
      try:
        response = urllib2.urlopen(req)
      except urllib2.HTTPError, e:
          self.log('Unable to acquire AuthToken.')
          self.log('Error code: ' + str(e))
          sys.exit(1)
      except urllib2.URLError, e:
          self.log('Failed to connect to auth server.')
          self.log( 'Reason: ' + str(e.reason ))
          sys.exit(1)
      lresponse = response.read()
      authResponseDict = dict(x.split("=") for x in lresponse.split("\n") if x)
      if authResponseDict["Auth"] is None:
        self.log("Unable to acquire token")
        sys.exit(1)
      authToken = authResponseDict["Auth"] 
      self.log("Acquired Token:" + authToken)
      return authToken    
    
class HTTPSConnectionV3(httplib.HTTPSConnection):
    def __init__(self, *args, **kwargs):
        httplib.HTTPSConnection.__init__(self, *args, **kwargs)

    def connect(self):
        sock = socket.create_connection((self.host, self.port), self.timeout)
        if self._tunnel_host:
            self.sock = sock
            self._tunnel()
        try:
            # set the public/private keys to use
            # client certificate authentication with the GSA's feedergate
            # feedergate HTTPS port is --gsa=https://<YOURGSA>:19902/xmlfeed
            cert_file = None # 'clientcert_crt.pem'          
            key_file =  None # 'cleintcert_key.pem'
            self.sock = ssl.wrap_socket(sock, key_file,cert_file, ssl_version=ssl.PROTOCOL_TLSv1)
        except ssl.SSLError, e:
            print("Trying SSLv23.")
            self.sock = ssl.wrap_socket(sock, key_file,cert_file, ssl_version=ssl.PROTOCOL_SSLv23)
            
class HTTPSHandlerV3(urllib2.HTTPSHandler):
  def https_open(self, req):
    return self.do_open(HTTPSConnectionV3, req)    
    
    
if __name__ == '__main__':

  username = None
  password = None
  cid = None
  gsa = None
  
  try:
    opts, args = getopt.getopt(sys.argv[1:], None, ["username=","password=", "cid=", "gsa="])
  except getopt.GetoptError:
    print 'Please specify --username= --password= --cid= --gsa='
    sys.exit(1)

  for opt, arg in opts:
    if opt == "--username":
      username = arg
    if opt == "--password":
      password = arg 
    if opt == "--cid":
      cid = arg    
    if opt== '--gsa':
      gsa = arg       
      
  m = mc2gsa(username, password, cid, gsa)
  m.processAllProducts()
  
  #m.getSingleOffer("12345")
  #m.removeSingleOffer("56789")g