#!/usr/bin/env python2
##################################################
#
# Copyright CloudSecurityAlliance 2010 (www.cloudsecurityalliance.org)
# Author: Kurt Seifried, kseifried@cloudsecurityalliance.org
#
##################################################
#
# License: GPLv3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# 
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
# 
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
##################################################
#
# Program flow:
#
# This program basically automates the task of:
#
# Create a CSV file and write a top line to it
#
# logging in to Google apps
# getting the groups listing page, put all the URLs to the group pages into an array
# if there is a Next link (e.g. more than 30 groups) click on it and put all the URLs to the group pages into an array
# continue clicking on next and putting URLs into the array until there isn't a Next link
#
# We now have an array with all the URLs of our Google groups.
#
# Load the first URL, get all the users, write them to the CSV file, if there is a Next link (e.g. more than 30 users in the group) click it,
# repeat until there is no Next link. Get another link from the URL array and repeat until there are no more URLs
#
# Then we upload the document to a specified Google Documents folder in the form of filename-date.csv
#
##################################################
#
# TODO (kseifried@cloudsecurityalliance.org) add google docs upload support based on date http://code.google.com/p/googlecl/
# TODO (kseifried@cloudsecurityalliance.org) figure out how to better extract the table rows
# TODO (kseifried@cloudsecurityalliance.org) Cleanup variable names and logic
# TODO (kseifried@cloudsecurityalliance.org) add threading support (one per group?)
#
##################################################

from mechanize import Browser
import lxml.html
import csv
import re

##################################################

class GoogleGroupBrowser():
    # Create a Mechanize browser object to access google. Log in, then we can grab pages
    def __init__(self, username, password, login_url):
        self.browser = Browser()
        self.browser.open(login_url)
        resp = self.browser.response()
        # select the first form (login form, enter details and submit)
        self.browser.select_form(nr=0)
        self.browser['Email'] = username
        self.browser['Passwd'] = password
        self.browser.submit()        
    def get_page(self, url):
        # Get a page, return the page, not rocket science
        self.browser.open(url)
        return self.browser.response().read()

def main():
    #
    # Set these to whatever you need them to be
    #
    DOMAIN_NAME = "example.org"
    USERNAME = "adminuser"
    PASSWORD = "password"
    OUTPUT_DIR = "/home/adminuser/tmp/"
    OUTPUT_FILE = "output_data.csv"
    
    OUTPUT_CSV = OUTPUT_DIR + OUTPUT_FILE
    
    LOGIN_URL = "https://www.google.com/a/cpanel/" + DOMAIN_NAME + "/"
    GROUP_LIST_PAGE = "https://www.google.com/a/cpanel/" + DOMAIN_NAME + "/GroupList"
    
    CSV_OUTPUT = csv.writer(open(OUTPUT_CSV, 'wb'), delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
    
    CSV_TOP_LINE=["list name", "user name", "email", "role"]
    
    CSV_OUTPUT.writerow(CSV_TOP_LINE)
    
    groups_url_list = []
    
    web_client = GoogleGroupBrowser(USERNAME, PASSWORD, LOGIN_URL)
    
    #
    # Get all the groups page URLs
    #
    while GROUP_LIST_PAGE != "":
        groups_page = web_client.get_page(GROUP_LIST_PAGE)
        GROUP_LIST_PAGE = ""
        doc = lxml.html.fromstring(groups_page)
        doc.make_links_absolute(LOGIN_URL)
    
        for element, attribute, link, pos in doc.iterlinks():
            # go through each a href in the page and get the URL ("link"), check against regex to see if it's a group, or "Next"
            if re.match(re.escape("https://www.google.com/a/cpanel/" + DOMAIN_NAME + "/Group?groupId="), link):
                # a group, append it to the list
                groups_url_list.append(link)
            elif re.match(re.escape("https://www.google.com/a/cpanel/" + DOMAIN_NAME + "/GroupList?"), link):
                # the Next" link, occurs twice on the page, so no matter if we write to GROUP_LIST_PAGE twice
                GROUP_LIST_PAGE = link
    
    #
    # iterate through the group list and get all the group members
    #
    for group_url in groups_url_list:
        # get the list name from the url
        list_name = re.sub(re.escape("https://www.google.com/a/cpanel/" + DOMAIN_NAME + "/Group?groupId="), "", group_url)
        while group_url != "":
            groups_page = web_client.get_page(group_url)
            # create regex for the next page link (just look for a link that starts with the group page url)
            next_page_link_regex = re.escape(group_url)
            # set the group_url to null, so when we run out of "Next" pages to click we can stop going
            group_url = ""
            doc = lxml.html.fromstring(groups_page)
            doc.make_links_absolute(LOGIN_URL)
            for element, attribute, link, pos in doc.iterlinks():
                # find the next link
                if re.match(next_page_link_regex, link):
                    # the next link, occurs twice on the page, so no matter if we write to group_url twice
                    group_url = link
            # process the page for content
            foo = doc.cssselect('tr')
            for table_row in foo:
                bar = table_row.text_content()
                # this stuff may contain unicode.
                bar = bar.encode('utf-8')
                # remove preceding and trailing line returns 
                bar = re.sub('^\n','',bar)
                bar = re.sub('\n$','',bar)
                # if the line ends in Member or Owner it's interesting
                if re.search("(Member|Owner)$", bar):
                    line_dict = re.split('\n', bar)                
                    line_dict.insert(0, list_name)
                    CSV_OUTPUT.writerow(line_dict)


if __name__ == '__main__':
    main()

