#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""This code runs a DFP report and downloads it as a csv file for line items
targeting a specific key. It is a modification to the api_reporting.py script
by  Guillaume Spillmann, which is a combination of the run_inventory_report.py
and download_report.py scripts by Jeff Sham.
In addition it deletes unnecessary columns introduced by the way the API
handles ad unit reporting, if AD_UNIT is in the selected dimensions.
Flat and Hierarchical ad unit views are currently (06-2012) a bit broken and
the inventory tree needs to be reconstructed. If you need those views with
a full inventory tree email gspillmann@google.com to get another script"""

__author__ = 'masri@google.com (Leo Masri)'

# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
import time
import csv

# Import appropriate classes from the client library.
from adspygoogle.dfp import DfpUtils
from adspygoogle.dfp.DfpClient import DfpClient


# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))

# Initialize appropriate service.
line_item_service = client.GetService(
    'LineItemService', 'https://www.google.com', 'v201204')

# Get line items by statement.
line_items = DfpUtils.GetAllEntitiesByStatementWithService(line_item_service)

# Now, I need to find all line items targeting the kvasi key
# In Orange America's network, it's id is 79605
sought_key='79605'
as_line_items=[]
for li in line_items:
  add_li = False
  if 'targeting' in li.keys():
    if 'customTargeting' in li['targeting']:
      if 'children' in li['targeting']['customTargeting'].keys():
        for child in li['targeting']['customTargeting']['children']:
          if child['CustomCriteriaNode_Type']=='CustomCriteriaSet':
            for grandchild in child['children']:
              if grandchild['CustomCriteriaNode_Type']=='CustomCriteria':
                if grandchild['keyId']==sought_key:
                  add_li=True
  if add_li:
    as_line_items.append(li['id'])

# Initialize appropriate service. By default, the request is always made against
# sandbox environment.
report_service = client.GetReportService(
    'https://www.google.com', 'v201204')


# Create report job.
# Edit 'reportQuery' to get the report you need. Documentation is at goo.gl/wyLnJ
report_job = {
    'reportQuery': {
        'dimensions': ['ADVERTISER_NAME', 'ORDER', 'LINE_ITEM', 'COUNTRY_NAME', 'AD_UNIT_NAME'],
        'columns': ['AD_SERVER_IMPRESSIONS', 'AD_EXCHANGE_IMPRESSIONS',
                    'AD_SERVER_CTR', 'AD_EXCHANGE_CTR',
                    'AD_SERVER_REVENUE', 'AD_EXCHANGE_REVENUE', 'AD_SERVER_',
                    'AD_SERVER_AVERAGE_ECPM', 'AD_EXCHANGE_AVERAGE_ECPM',
                    'AD_SERVER_CLICKS', 'AD_EXCHANGE_CLICKS'],
        'dimensionAttributes': ['LINE_ITEM_START_DATE_TIME', 'LINE_ITEM_END_DATE_TIME',
                                'LINE_ITEM_COST_TYPE', 'LINE_ITEM_COST_PER_UNIT',
                                'LINE_ITEM_GOAL_QUANTITY'],
        'dateRangeType': 'CUSTOM_DATE',
        'adUnitView': 'TOP_LEVEL',
        'startDate': {'year': '2012', 'month': '3', 'day': '1'},
        'endDate': {'year': '2012', 'month': '3', 'day': '31'}
    }
}

# Create the filter statment to include on;y the desired Line Items in the report
values=[]
if as_line_items:
  i=0
  query='WHERE LINE_ITEM_ID IN ('
  separator = ', '
  for as_line_item in as_line_items:
    values.append({'key': 'lineItemId'+str(i),
                   'value': {'xsi_type': 'NumberValue','value': as_line_item}})
    query+=':lineItemId' + str(i) + separator
    i+=1
  query=query[0:len(query)-len(separator)]+')'

  filter_statement = {
    'query': query,
    'values': values
  }
  report_job['reportQuery']['statement']=filter_statement
else:
  print 'No line items found targeting the desired key.'
  exit()

query = report_job['reportQuery']
columns = query['columns']
dimensions = query['dimensions']
timer = 1;

# Run report.
report_job = report_service.RunReportJob(report_job)[0]

# Wait for report to complete.
status = report_job['reportJobStatus']
while status != 'COMPLETED' and status != 'FAILED':
  print 'Report job with \'%s\' id is still running.' % report_job['id']
  time.sleep(30)
  timer += 1
  status = report_service.GetReportJob(report_job['id'])[0]['reportJobStatus']

if status == 'FAILED':
  print ('Report job with id \'%s\' failed to complete successfully.'
         % report_job['id'])
else:
  print 'Report job with id \'%s\' completed successfully.' % report_job['id']

  # Change to your preffered export format.
  export_format = 'CSV'

  # Download report data.
  data = DfpUtils.DownloadReport(report_job['id'], export_format, report_service)
  print 'Report downloaded'

  # Write to  file
  f = open('dfp_report_' + report_job['id'] + '.csv','w')
  f.write(data)
  f.close()

  # Remove unnecessary columns
  row = 0
  f = csv.reader(open('dfp_report_' + report_job['id'] + '.csv','rb'), delimiter=',', quotechar='"')
  r = open('dfp_report_' + report_job['id'] + '_final.csv','w')
  headers = f.next()
  ad_unit_dimensions = ['AD_UNIT', 'AD_UNIT_NAME', 'AD_UNIT_ID']
  if bool(sum(map(lambda x: x in dimensions, ad_unit_dimensions))):
    max_column_index = len(headers) - len(columns) + 2
  else:
    max_column_index = len(headers) + 1
  for header in headers[0:max_column_index]:
    r.write(str(header) + ';')
  r.write('\n')
  for data in f:
    row +=1
    for item in data[0:max_column_index]:  
      r.write(str(item) + ';')
    r.write('\n')
    if row == 1:
      print 'Row 1 written'
    if row % 10000 == 0:
      print 'Row ' + str(row) + ' written'
  
  minutes = timer / 2
  
  print 'Report completed in ' + str(minutes) + ' minutes'
  
  log = open('success.log','a')
  m = 'Report id %s downladed in %s minutes, %s\n' % (report_job['id'],minutes,query)
  log.write(m)
  log.close()
