#!/bin/env python

# Author : zhandongyun

import os
import sys
import subprocess
import time
import datetime
import dateutil
import argparse
import re

_TESTING_ = False

ACTION_START = 0
ACTION_STOP = 1
ACTION_CHECK = 2
ACTION_UPDATE = 3


SERVER = "whole_server"

SCHEDULER = "i_scheduler"
DOWNLOADER = "i_downloader"
DATA_SAVER = "i_data_saver"
EXTRACTOR = "i_extractor"
ENTITY_EXTRACTOR = "i_entity_extractor"
CRAWLER_MERGE = "i_crawler_merge"

SCHEDULER_SHORT = "SC"
DOWNLOADER_SHORT = "DL"
DATA_SAVER_SHORT = "DS"
EXTRACTOR_SHORT = "EX"
ENTITY_EXTRACTOR_SHORT = "EE"
CRAWLER_MERGE_SHORT = "CM"

short_map = {
    SCHEDULER_SHORT : SCHEDULER,
    DOWNLOADER_SHORT : DOWNLOADER,
    DATA_SAVER_SHORT : DATA_SAVER,
    EXTRACTOR_SHORT : EXTRACTOR,
    ENTITY_EXTRACTOR_SHORT : ENTITY_EXTRACTOR,
    CRAWLER_MERGE_SHORT : CRAWLER_MERGE
}

def run_cmd(cmd):
    global _TESTING_
    # print cmd
    if _TESTING_:
        return ''
    res = ''
    p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
    while True:
        line = p.stdout.readline()
        res += line
        if line:
            # print line.strip()
            sys.stdout.flush()
        else:
            break
    p.wait()
    return res

def run_on_remote_machine(cmd, ip):
    global _TESTING_
    if _TESTING_:
        # print cmd
        if cmd.startswith('ps'):
            return '1234567'
    return run_cmd('ssh %s "%s"' % (ip, cmd))

def run_on_machines(cmd, iplist):
    for ip in iplist:
        run_on_remote_machine(cmd, ip)

def deploy_files_to_remote_machine(rel_path, remote_path, ip):
    run_cmd('scp -r %s %s:%s' % (rel_path, ip, remote_path))

def deploy_files_to_machines(rel_path, remote_path, machines):
    for ip in machines:
        deploy_files_to_remote_machine(rel_path, remote_path, ip)

topo = None

def default_start_func(server, basepath, role, conf):
    if default_check_alive_func(server, basepath, role, conf):
        print "Already running, skip"
        return True
    print "Starting " + role + " on " + server + " with conf " + basepath + "/" + conf
    run_on_remote_machine("cd " + basepath + "/" + role + "; /bin/sh start.sh " + conf, server)
    if not default_check_alive_func(server, basepath, role, conf):
        print "ERROR: Manual checking required"
        sys.exit(1)
    return True


def default_stop_func(server, basepath, role, conf):
    global _TESTING_
    print "Stoping " + role + " on " + server + " with conf " + basepath + "/" + conf
    pid = run_on_remote_machine("ps ux | grep '%s' | grep -v grep | awk '{print $2}'" % conf, server).strip()
    if '' == pid:
        print "This server is not running"
    else:
        run_on_remote_machine("kill " + pid, server)
        while default_check_alive_func(server, basepath, role, conf):
            print "Waiting for server to die..."
            if _TESTING_:
                break
    return True

def default_check_alive_func(server, basepath, role, conf):
    # print "Checking " + role + " on " + server + " with conf " + basepath + "/" + conf
    pid = run_on_remote_machine("ps ux | grep '%s' | grep -v grep | awk '{print $2}'" % conf, server).strip()
    if pid == "":
        print role + " on " + server + " with conf " + basepath + "/" + conf + " is not running"
        return False
    else:
        print role + " on " + server + " with conf " + basepath + "/" + conf + " is running"
        return True

def default_update_func(server, basepath):
    print "Updating on " + server + " with path " + basepath
    res = run_on_remote_machine("cd " + basepath + "; rm bdp -rf", server).strip()
    res = run_on_remote_machine("cd " + basepath + "; git pull 1>/dev/null", server).strip()
    if res != "":
        return False
    else:
        run_on_remote_machine("cd " + basepath + "; ./build_thrift_all.sh", server).strip()
        return True

default_funcs = (default_start_func, default_stop_func, default_check_alive_func, default_update_func)
role_func_map = {
    DOWNLOADER : default_funcs,
    DATA_SAVER : default_funcs,
    EXTRACTOR : default_funcs,
    ENTITY_EXTRACTOR : default_funcs,
    CRAWLER_MERGE : default_funcs,
}

def update(server):
    return default_update_func(server[0], server[1][0])

def call_on_instances_of_server(server_def, action, func_map, roles_to_manipulate):
    server = server_def[0]
    instances_def = server_def[1]
    base_path = instances_def[0]
    instance_def = instances_def[1]
    res = True
    for instance in instance_def:
        role = instance[0]
        if role not in roles_to_manipulate:
            continue
        conf = instance[1]
        func = func_map[role][action]
        res &= func(server, base_path, role, conf)
    return res

def load_file(path):
    try:
        execfile(path, globals(), globals())
    except Exception, e:
        print e
        print 'load file failed', path


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('conf', help='config file to use')
    parser.add_argument('-s', '--start', help='start cluster', action='store_true', dest='start', default=False)
    parser.add_argument('-t', '--stop', help='stop cluster', action='store_true', dest='stop', default=False)
    parser.add_argument('-u', '--update', help='pull code from git', action='store_true', dest='update', default=False)
    parser.add_argument('-c', '--check', help='check cluster running', action='store_true', dest='check', default=False)
    parser.add_argument('-T', '--type', help='types of servers to manipulate: ' +
                        ','.join(short_map.keys()) + ' stands for ' +
                        ','.join(short_map.values()) + ' respectively. Multiple types are seperated by ",". '
                                                       'By default, all types of servers are manipulated together.'
                                                       'Note that the code cannot be updated separately. If you'
                                                       'specified the "update" action, the code is pulled together.',
                        dest='types', default=','.join(short_map.keys()))
    args = parser.parse_args()
    if args.conf is None:
        print "Conf file not specified"
        sys.exit(1)
    load_file(args.conf)
    if not globals().has_key('topo') or globals()['topo'] is None:
        print "Invalid config file, cluster topology not defined"
        sys.exit(1)
    topo = globals()['topo']
    if _TESTING_:
        print topo
    if not args.start and not args.stop and not args.update and not args.check:
        print "No action specified"
    else:
        types = args.types.split(',')
        for t in types:
            if t not in short_map.keys():
                print "Invalid server type specified : " + t
        real_types = map(lambda x : short_map[x], types)
        if _TESTING_:
            print types
            print real_types
        for server_def in topo:
            if args.stop:
                res = call_on_instances_of_server(server_def, ACTION_STOP, role_func_map, real_types)
                if not res:
                    print "Cluster startup failed"
                    sys.exit(1)
            if args.update:
                res = update(server_def)
                if not res:
                    print "Cluster startup failed"
                    sys.exit(1)
            if args.start:
                res = call_on_instances_of_server(server_def, ACTION_START, role_func_map, real_types)
                if not res:
                    print "Cluster startup failed"
                    sys.exit(1)
            if args.check:
                res = call_on_instances_of_server(server_def, ACTION_CHECK, role_func_map, real_types)
                if not res:
                    print "Cluster startup failed"
                    sys.exit(1)

        print "Cluster operation success"

