#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import sys
import time
import logging
import logging.handlers
from optparse import OptionParser

from htlib import *
from htestcase import *

"""
About read mode:
    1. step
    2. rmode

"""

class HdfsTestCase(HTestCase, HTestEnv):
    """
    hdfs test base class.
    """
    def __init__(self, interface, home_dir, cluster_name):
        HTestCase.__init__(self, interface, home_dir, cluster_name)
        HTestEnv.__init__(self, home_dir, cluster_name) 

        self.tno    = ''
        self.prefix = 'default'
        self.report = 'False'
        self.round  = 3
        self.total_write_bytes = 0 

        self.uid	= os.getuid();

        self.init()

        nfs_mkdirs('%s/logs' % self.sdir)

        LOG_FILENAME = '%s/logs/%s-%s.log' % (self.sdir, self.tno, self.client_no)
        self.mylogger = logging.getLogger('MyLogger')
        self.mylogger.setLevel(logging.DEBUG)
        handler = logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=100*1024*1024, backupCount=5)
        self.mylogger.addHandler(handler)

        if options.self_test:
            self.fsize       = (1*1024*1024*1024, )
            self.concurrency = (8,)
            self.expire      = 30
            self.round       = 1

    def init(self):
        """
        virtual function
        """
        pass

    def __repr__(self):
        ret = ''
        ret += '%s <%s> : [home_dir %s, cluster_name %s, dnum %d, cnum %d, fsize %s, con %s, round %d]' % (
                self.tno,
                self.client_no,
                self.home_dir, 
                self.cluster_name,
                self.dnum,
                self.cnum,
                self.fsize,
                self.concurrency,
                self.round)
        return ret

    def clean_hdp_data(self):
        self.barrier.barrier()
        if self.lock():
            log_out(" clear hadoop data begin...\n")
            command = '%s/bin/hadoop fs -rmr /user/%s/%s' % (self.hadoop, self.uid, self.tno)
            os.system(command)
            log_out(" clear hadoop data finished...\n")
            self.unlock()
        self.barrier.barrier()

    def format_hdfs(self, restart=True):
        """ 
        @param restart: After format, restart HDFS or not.
        """
        self.barrier.barrier()
        if self.lock():
            log_out('******Format %s******\n' % self.cluster_name)
            self.mylogger.info('%s: format hdfs begin\n' % (time.ctime()))
            self.format_hdp(restart)
            self.mylogger.info('%s: format hdfs end\n' % (time.ctime()))
            self.unlock()
        self.barrier.barrier()

    def restart_hdfs(self):
        self.barrier.barrier()
        if self.lock():
            log_out('******Start %s******\n' % self.cluster_name)
            self.restart_hdp()
            self.unlock()
        self.barrier.barrier()

    def stop_hdfs(self):
        self.barrier.barrier()
        if self.lock():
            log_out('******Stop %s******\n' % self.cluster_name)
            self.stop_hdp()
            self.unlock()
        self.barrier.barrier()

    def one_stage(self, args):
        """
        Step:
            1. write or read
            2. report
        """
        log_out('Stage %d begin...\n' % (args['stage']))
        self.mylogger.info('%s: begin %s' % (time.ctime(), args))

        if args['w'] != 0:
            dir = '%s/%s/%s/d%d/c%d/f%d/con%d/r%d' % (self.sdir, self.tno, self.prefix, self.dnum, self.cnum, args['fsize'], args['w'], args['rno'])
            nfs_mkdirs(dir)

        self.barrier.barrier()

        for i in xrange(2):
            if i == 0:
                self.report = 'False'
            elif i == 1:
                """ ONLY report datanode disk space """
                self.report = 'only_report'

            try:
                command = """%s/bin/hadoop jar %s/../hadoop-test/htest.jar TestIO -sdir %s -tno %s -prefix %s -dnum %d -cnum %d -totalbyte %d -expire %d -cno %s -report %s \
                        -rno %d -stage %d -fsize %d -w %d -r %d -step %d -rsrc %s -rmode %d""" \
                        % (self.hadoop, self.hadoop,
                                self.sdir, 
                                self.tno,
                                self.prefix,
                                self.dnum,
                                self.cnum,
                                self.total_write_bytes,
                                self.expire, 
                                self.client_no,
                                self.report,
                                args['rno'],
                                args['stage'],
                                args['fsize'],
                                args['w'],
                                args['r'],
                                args['step'],
                                args['rsrc'], 
                                args['rmode'])
            except KeyError, e:
                log_err(' args error: %s\n' % e.strerror())

            sys.stdout.write(command + '\n')
            try:
                os.system(command)
            except:
                log_err('%s **********error*********\n' % command)

            self.barrier.barrier()

        log_out('Stage %d end...\n' % (args['stage']))
        self.mylogger.info('%s: end %s' % (time.ctime(), args))

class dfs_simple_writer(HdfsTestCase):
    """ 
    test: 4
    @precondition 
    """
    def __init__(self, interface, home_dir, cluster_name):
        HdfsTestCase.__init__(self, interface, home_dir, cluster_name)

    def init(self):
        """
        write in concurrency 4 
        """
        self.tno         = 'tc7.2.0-dfs-simple'
        self.fsize       = [1024*1024*x for x in (1024, )]
        self.concurrency = (4, )
        self.expire      = 600
        self.round       = 1

    def prepare(self):
        #self.format_hdfs() #do not format and restart hdfs, it's prepared by tester to simplify the test
        pass

    def run(self):
        for s in self.fsize:
            for c in self.concurrency:
                for r in xrange(self.round): # 3 round
                    self.expire = self.expire * options.wr_ratio

                    params = {'rno':r, 'stage':1, 'w':c, 'fsize':s, 'r':0, 'step':0, 'rsrc':'null',   'rmode':-1}
                    self.one_stage(params);

                    self.expire = self.expire / options.wr_ratio

class dfs_simple_reader(HdfsTestCase):
    """ 
    test: 4; dfs_simple_reader read the data written by dfs_simple_writer, and it
	must use the identical write_concur with dfs_simple_writer, default is 4;
    @precondition 
    """
    def __init__(self, interface, home_dir, cluster_name):
        HdfsTestCase.__init__(self, interface, home_dir, cluster_name)

    def init(self):
        """
        concurrent reader
        """
        self.tno         = 'tc7.2.0-dfs-simple'
        self.fsize       = [1024*1024*x for x in (1024, )]
        self.concurrency = (4, 8, 16, 32, )
        self.expire      = 120
        self.write_concur= 4

    def prepare(self):
        #self.format_hdfs()	#do not clean hdfs, it contains the data to read
        pass

    def run(self):

        for r in xrange(self.round): # 3 round
            for s in self.fsize:
                for c in self.concurrency:

                    params = {'rno':r, 'stage':2, 'w':0, 'fsize':s, 'r':c, 'step':3, 'rsrc':'%d,1' % self.write_concur, 'rmode':-1}
                    self.one_stage(params);

                    params = {'rno':r, 'stage':3, 'w':0, 'fsize':s, 'r':c, 'step':0, 'rsrc':'%d,1' % self.write_concur, 'rmode':0}
                    self.one_stage(params);
                    

class dfs_performance(HdfsTestCase):
    """ 
    test: 4
    @precondition 
    """
    def __init__(self, interface, home_dir, cluster_name):
        HdfsTestCase.__init__(self, interface, home_dir, cluster_name)

    def init(self):
        """
        concurrency 64?
        """
        self.tno         = 'tc7.2.7-dfs-performance'
        self.fsize       = [1024*1024*x for x in (1024, )]
        self.concurrency = (1, 4, 16, 32)
        self.expire      = 600

    def prepare(self):
        self.format_hdfs()

    def run(self):
        for s in self.fsize:
            for c in self.concurrency:
                for r in xrange(self.round): # 3 round
                    self.expire = self.expire * options.wr_ratio

                    params = {'rno':r, 'stage':1, 'w':c, 'fsize':s, 'r':0, 'step':0, 'rsrc':'null',   'rmode':-1}
                    self.one_stage(params);

                    self.expire = self.expire / options.wr_ratio

                    params = {'rno':r, 'stage':2, 'w':0, 'fsize':s, 'r':c, 'step':3, 'rsrc':'%d,1'%c, 'rmode':-1}
                    self.one_stage(params);

                    params = {'rno':r, 'stage':3, 'w':0, 'fsize':s, 'r':c, 'step':0, 'rsrc':'%d,1'%c, 'rmode':-1}
                    self.one_stage(params);
                    
                    #params = {'rno':r, 'stage':4, 'w':c, 'fsize':s, 'r':c, 'step':0, 'rsrc':'%d,1'%c, 'rmode':-1}
                    #self.one_stage(params);

                    self.format_hdfs()

class dfs_scalability(HdfsTestCase):
    """
    cluster scale: [8, 16, 32, 64, 128]
	4 stages: write, read random file, read localized file, read and write
    test: 5
    """
    def __init__(self, interface, home_dir, cluster_name):
        """
        self.cluster_list = [
                'hdfs-scalability-8',
                'hdfs-scalability-16',
                'hdfs-scalability-32',
                'hdfs-scalability-64',
                'hdfs-scalability-128',
                ]

        if options.minor == 0:
            HdfsTestCase.__init__(self, interface, home_dir, 'iotest_32')
        elif options.minor == 1:
            HdfsTestCase.__init__(self, interface, home_dir, 'iotest_64')
        elif options.minor == 2:
            HdfsTestCase.__init__(self, interface, home_dir, 'iotest_128')
        """
        HdfsTestCase.__init__(self, interface, home_dir, cluster_name)

    def init(self):
        self.tno         = 'tc7.2.7-dfs-scalability'
        self.fsize       = [1024*1024*x for x in (1024, )]
        self.concurrency = (16,)
        self.expire      = 600

    def prepare(self):
        self.format_hdfs()

    def run(self):
        for s in self.fsize:
            for c in self.concurrency:
                for r in xrange(self.round): # 3 round
                    self.expire = self.expire * options.wr_ratio

                    params = {'rno':r, 'stage':1, 'w':c, 'fsize':s, 'r':0, 'step':0, 'rsrc':'null',   'rmode':-1}
                    self.one_stage(params);

                    self.expire = self.expire / options.wr_ratio

                    params = {'rno':r, 'stage':2, 'w':0, 'fsize':s, 'r':c, 'step':3, 'rsrc':'%d,1'%c, 'rmode':-1}
                    self.one_stage(params);

                    params = {'rno':r, 'stage':3, 'w':0, 'fsize':s, 'r':c, 'step':0, 'rsrc':'%d,1'%c, 'rmode':-1}
                    self.one_stage(params);
                    
                    params = {'rno':r, 'stage':4, 'w':c, 'fsize':s, 'r':c, 'step':0, 'rsrc':'%d,1'%c, 'rmode':-1}
                    self.one_stage(params);

                    self.format_hdfs()

class dfs_namenode_handler(HdfsTestCase):
    def __init__(self, interface, home_dir, cluster_name):
        HdfsTestCase.__init__(self, interface, home_dir, cluster_name)

    def init(self):
        self.tno         = 'tc7.10.3-nnh'
        self.fsize       = [1024*1024*x for x in (1024,)]
        self.concurrency = (1, 4, 16)
        self.expire      = 300

    def setup(self, nnh):
        self.prefix = 'nnh%d' % nnh

        self.barrier.barrier()
        if self.lock():
            log_out('******change dfs.namenode.handler.count to %d******\n' % nnh)
            self.hdfs_parser.set('dfs.namenode.handler.count', '%d' % nnh)
            self.mylogger.info('%s: change dfs.namenode.handler.count to %d\n' % (time.ctime(), nnh))
            self.mylogger.info('%s: current cluster config: %s\n' % (time.ctime(), self.cluster))

            time.sleep(6)
            self.format_hdp()
            self.unlock()
        self.barrier.barrier()

    def prepare(self):
        self.old_nnh = self.hdfs_parser.get('dfs.namenode.handler.count')
        self.mylogger.info('%s: current cluster config: %s\n' % (time.ctime(), self.cluster))

        self.format_hdfs()

    def finish(self):
        if self.lock():
            log_out('******change dfs.namenode.handler.count to old value %s******\n' % self.old_nnh)
            self.hparser.set('dfs.namenode.handler.count', self.old_nnh)

            self.mylogger.info('%s: restore dfs.namenode.handler.count to %s\n' % (time.ctime(), self.old_nnh))
            self.mylogger.info('%s: current cluster config: %s\n' % (time.ctime(), self.cluster))
            self.unlock()

    def run(self):
        namenode_handler_list = (10, 20, 40)
        for nnh in namenode_handler_list:
            self.setup(nnh)
            for s in self.fsize:
                for c in self.concurrency:
                    for r in xrange(self.round): # 3 round
                        self.expire = self.expire * options.wr_ratio

                        params = {'rno':r, 'stage':1, 'w':c, 'fsize':s, 'r':0, 'step':0, 'rsrc':'null',   'rmode':-1}
                        self.one_stage(params);

                        self.expire = self.expire / options.wr_ratio

                        params = {'rno':r, 'stage':2, 'w':0, 'fsize':s, 'r':c, 'step':0, 'rsrc':'%d,1'%c, 'rmode':-1}
                        self.one_stage(params);
                        
                        params = {'rno':r, 'stage':3, 'w':c, 'fsize':s, 'r':c, 'step':0, 'rsrc':'%d,1'%c, 'rmode':-1}
                        self.one_stage(params);

                        self.format_hdfs()

class dfs_readmode(HdfsTestCase):
    def __init__(self, interface, home_dir, cluster_name):
        HdfsTestCase.__init__(self, interface, home_dir, cluster_name)

    def init(self):
        self.tno         = 'tc7.10.4-readmode'
        self.fsize       = [1024*1024*x for x in (64, )]
        self.concurrency = (1, 4, 16)
        self.expire      = 600
        self.total_write_bytes = 10*1024*1024*1024

    def prepare(self):
        self.format_hdfs()

    def run(self):
        for s in self.fsize:
            for c in self.concurrency:
                for r in xrange(self.round):
                    params = {'rno':r, 'stage':1, 'w':c, 'fsize':s, 'r':0, 'step':0, 'rsrc':'null',   'rmode':-1}
                    self.one_stage(params);

        for s in self.fsize:
            for c in self.concurrency:
                for r in xrange(self.round):
                    params = {'rno':r, 'stage':2, 'w':0, 'fsize':s, 'r':c, 'step':0, 'rsrc':'%d,1'%c, 'rmode':-1}
                    self.one_stage(params);
                    
                    params = {'rno':r, 'stage':3, 'w':0, 'fsize':s, 'r':c, 'step':-2, 'rsrc':'%d,1'%c, 'rmode':-1}
                    self.one_stage(params);

                    params = {'rno':r, 'stage':4, 'w':0, 'fsize':s, 'r':c, 'step':-4, 'rsrc':'%d,1'%c, 'rmode':-1}
                    self.one_stage(params);

                    params = {'rno':r, 'stage':5, 'w':0, 'fsize':s, 'r':c, 'step':0, 'rsrc':'%d,1'%c, 'rmode':-1}
                    self.one_stage(params);

                    params = {'rno':r, 'stage':6, 'w':0, 'fsize':s, 'r':c, 'step':0, 'rsrc':'%d,1'%c, 'rmode':0}
                    self.one_stage(params);

                    params = {'rno':r, 'stage':7, 'w':0, 'fsize':s, 'r':c, 'step':0, 'rsrc':'%d,1'%c, 'rmode':50}
                    self.one_stage(params);

                    params = {'rno':r, 'stage':8, 'w':0, 'fsize':s, 'r':c, 'step':0, 'rsrc':'%d,1'%c, 'rmode':100}
                    self.one_stage(params);

class dfs_blocksize(HdfsTestCase):
    def __init__(self, interface, home_dir, cluster_name):
        HdfsTestCase.__init__(self, interface, home_dir, cluster_name)

    def init(self):
        self.tno         = 'tc7.10.5-blocksize'
        self.fsize       = [1024*1024*x for x in (1024, )]
        self.concurrency = (1, 4, 16)
        self.expire      = 300

    def setup(self, blocksize):
        self.prefix = 'blocksize_%s' % blocksize

        self.barrier.barrier()
        if self.lock():
            log_out('******change dfs.block.size to %d******\n' % blocksize)
            self.hdfs_parser.set('dfs.block.size', '%d' % blocksize)
            self.mylogger.info('change dfs.block.size to %d\n' % blocksize)
            self.mylogger.info('%s: current cluster config: %s\n' % (time.ctime(), self.cluster))

            time.sleep(6)
            self.format_hdp()
            self.unlock()
        self.barrier.barrier()

    def prepare(self):
        self.old_blocksize = self.hdfs_parser.get('dfs.block.size')
        self.mylogger.info('%s: current cluster config: %s\n' % (time.ctime(), self.cluster))

        self.format_hdfs()

    def finish(self):
        if self.lock():
            log_out('******change dfs.block.size to old value %s******\n' % self.old_blocksize)
            self.hparser.set('dfs.block.size', self.old_blocksize)
            self.mylogger.info('%s: restore dfs.block.size to %s\n' % (time.ctime(), self.old_blocksize))
            self.mylogger.info('%s: current cluster config: %s\n' % (time.ctime(), self.cluster))
            self.unlock()

    def run(self):
        blocksize_list = [1024*1024*bs for bs in (32, 64, 128, 256)]
        for bs in blocksize_list:
            self.setup(bs)
            for s in self.fsize:
                for c in self.concurrency:
                    for r in xrange(self.round): # 3 round
                        self.expire = self.expire * options.wr_ratio

                        params = {'rno':r, 'stage':1, 'w':c, 'fsize':s, 'r':0, 'step':0, 'rsrc':'null',   'rmode':-1}
                        self.one_stage(params);

                        self.expire = self.expire / options.wr_ratio

                        params = {'rno':r, 'stage':2, 'w':0, 'fsize':s, 'r':c, 'step':0, 'rsrc':'%d,1'%c, 'rmode':-1}
                        self.one_stage(params);
                        
                        params = {'rno':r, 'stage':3, 'w':c, 'fsize':s, 'r':c, 'step':0, 'rsrc':'%d,1'%c, 'rmode':-1}
                        self.one_stage(params);

                        self.format_hdfs()

class dfs_template(HdfsTestCase):
    def __init__(self, interface, home_dir, cluster_name):
        HdfsTestCase.__init__(self, interface, home_dir, cluster_name)

        self.tno         = 'template'
        self.fsize       = [1024*1024*x for x in (1024, )]
        self.concurrency = (1, 4, 16)
        self.expire      = 300

    def setup(self, stp, rmode):
        self.prefix = 's%d_r%d' % (stp, rmode)

    def prepare(self):
        """
        @override
        """
        self.format_hdfs()

    def run(self):
        step_list = (-3, )
        rmode_list = (-1, )

        for stp in step_list:
            for rmode in rmode_list:
                for s in self.fsize:
                    self.setup(stp, rmode)
                    for c in self.concurrency:
                        for r in xrange(self.round): # 3 round
                            self.expire = self.expire * options.wr_ratio

                            params = {'rno':r, 'stage':1, 'w':c, 'fsize':s, 'r':0, 'step':0,   'rsrc':'null',   'rmode':0}
                            self.one_stage(params);

                            self.expire = self.expire / options.wr_ratio

                            params = {'rno':r, 'stage':2, 'w':0, 'fsize':s, 'r':c, 'step':stp, 'rsrc':'%d,1'%c, 'rmode':rmode}
                            self.one_stage(params);
                            
                            params = {'rno':r, 'stage':3, 'w':c, 'fsize':s, 'r':c, 'step':stp, 'rsrc':'%d,1'%c, 'rmode':rmode}
                            self.one_stage(params);

                            self.format_hdfs()

if __name__ == "__main__":
    parser = OptionParser()
    parser.add_option("-i", "--interface", dest="interface",   type='string', help="network interface", default="eth0")
    parser.add_option("-H", "--home_dir",  dest="home_dir",    type='string', help="htest home dir",    default="/share/home/testhdp")
    parser.add_option("-c", "--cluster",   dest="cluster_name",type='string', help="cluster name",      default="cluster1")
    parser.add_option("-t", "--test",      dest="test",        type='int',    help="run which test",    default=0)
    parser.add_option("-r", "--wr_ratio",  dest="wr_ratio",    type='int',    help="w/r time radio",    default=4)
    parser.add_option("-T", "--selftest",  dest="self_test",   action="store_true", default=False)
    #parser.add_option("-m", "--minor",     dest="minor",       type='int',    help="run which test",    default=0)
    #parser.add_option("-R", "--round",     dest="round",       type='int',    help="which round",       default=0)
    (options, args) = parser.parse_args()

    test_mapping = {
            0: 'dfs_template',
            1: 'dfs_simple_writer',
            2: 'dfs_simple_reader',
            3: 'dfs_namenode_handler',
            4: 'dfs_readmode',
            5: 'dfs_blocksize',
            6: 'dfs_performance',
            5: 'dfs_scalability',
	}

    exec('suite = %s("%s", "%s", "%s")' % (test_mapping[options.test], options.interface, options.home_dir, options.cluster_name))
    suite.mylogger.info('%s: %s begin.' % (time.ctime(), suite))
    suite.start()
    suite.mylogger.info('%s: %s end.' % (time.ctime(), suite))
