# -*- coding: utf-8 -*-
"""
Created on 2022/2/24

@author: Song
"""
import numpy as np

from DBTestAnalysisLib import *
import streamlit as st
import leancloud

from st_aggrid import *
from streamlit_autorefresh import st_autorefresh
from jenkins import NEED_BUILD, NEED_REPAIR, GOOD

import plotly.express as px
import plotly.graph_objects as go

from vika import Vika

vika = Vika("uskaYYRdtqFoPqcVw3KM2Kf")

st.set_page_config(page_title="测试概要", page_icon="📈", layout="wide")
count = st_autorefresh(interval=600000, limit=400, key="counter")
lastRefreshTime = datetime.now().strftime("%m-%d %H:%M")
st.sidebar.write(f"刷新页面: {count + 1}/400 {lastRefreshTime}")

latestValidDBCode = {
    'N1': '2022-11-11 16:01:00',
    'N2': '2022-11-11 16:01:00',
}

def hashDefaultGetFunc(item, key):
    return item[key]


def lcGetFunc(item, key):
    return item.get(key)


def g(row, key):
    return row[key] if key in row else None


class HashResult:
    def __init__(self, data, keys, getFunc=hashDefaultGetFunc):
        self._data = data
        self._keys = keys
        self._cache = {}
        for item in data:
            cache_key = '_'.join(list(map(lambda k: getFunc(item, k), keys)))
            if cache_key in self._cache:
                self._cache[cache_key].append(item)
            else:
                self._cache[cache_key] = [item]

    def get(self, *args):
        cache_key = '_'.join(args)
        if cache_key in self._cache:
            return self._cache[cache_key]
        else:
            return []


def milestones():
    metaList = []
    for db in ['TGC', 'TGK', 'TGKI', 'TGS', 'N1', 'N2', 'PG', 'MA']:
        for dataset in ['energy', 'traffic', 'syn']:  #
            for mSize in ['.1', '.all']:  # '.01', '.5', '.9',
                metaList.append({'db': db, 'dataset': dataset, 'tp_size': mSize})

    meta = pd.DataFrame(metaList)
    return meta
    # toPrint1 = meta.pivot(index=['dataset', 'tp_size'], columns='system', values='cell_content')
    # st.dataframe(toPrint1.style.highlight_null(), use_container_width=True)


readWriteTests = ['ehistory', 'snapshot', 'aggmax', 'etpc', 'reachable', 'update', 'append']  #
dbList = ['TGC', 'TGK', 'TGL', 'TGLI', 'TGKI', 'TGS', 'N1', 'N2', 'PG', 'MA']
datasetList = ['energy', 'traffic', 'syn']

def allTests():
    metaList = []
    for db in ['TGC', 'TGK', 'TGL', 'TGS', 'N1', 'N2', 'PG', 'MA']:  #
        for dataset in datasetList:  #
            for mSize in ['.1', '.all']:  # '.01', '.5', '.9',
                for test in readWriteTests:  # , 'w5r5', 'w1r3','w3r1'
                    if (test == 'reachable' and (dataset == 'energy' or db == 'TGL')) or (test == 'ehistory' and db == 'TGL'):
                        continue
                    else:
                        metaList.append({'db': db, 'dataset': dataset, 'tp_size': mSize, 'test': test})

    for db in ['TGKI', 'TGLI']:
        for dataset in datasetList:  #
            for mSize in ['.1', '.all']:  # '.01', '.5', '.9',
                for test in ['aggmax', 'etpc', 'update']:
                    metaList.append({'db': db, 'dataset': dataset, 'tp_size': mSize, 'test': test})

    meta = pd.DataFrame(metaList)
    meta['dataset'] = pd.Categorical(meta['dataset'], ["energy", "traffic", "syn"])
    return meta


def lcMilestoneBuild(meta):
    query = leancloud.Query('TestMilestone')
    query.not_equal_to('extra', 'deploy')
    query.exists('duration')
    query.add_descending('createdAt')
    query.limit(999)
    objList = query.find()
    cache = HashResult(objList, ['DB', 'Dataset', 'MSize'], lcGetFunc)

    # st.write(cache._cache)

    def mBuildInfo(row):
        dataset = row['dataset']
        db = row['db']
        tp_size = row['tp_size']
        if db == 'TGL': db = 'TGK'
        if db == 'TGLI': db = 'TGKI'
        buildInfo = cache.get(db.upper(), dataset, 't' + tp_size)
        if len(buildInfo) > 0:
            # validaHistoryBuildT = list(filter(lambda obj: 'duration' in obj and obj['duration'] is not None, buildInfo))
            # # st.write(validaHistoryBuildT)
            historyBuildT = list(map(lambda obj: obj.get('duration'), buildInfo))
            medianBuildTime = np.median(historyBuildT)
            m = buildInfo[0]
            return [m.updated_at.strftime("%m-%d %H:%M"), m.get('duration'), len(buildInfo), medianBuildTime]
        else:
            return [None, None, None, None]

    meta[['lc_m_ct',
          'lc_m_dur_t',
          'lc_m_his_cnt',
          'lc_m_his_dur_median']] = meta.apply(mBuildInfo, axis=1, result_type="expand")
    return meta


def vkMilestoneBuild(meta):
    buildInfo = vika.datasheet("dstSkZb56uR8XsulbF", field_key="name")
    records = list(map(lambda x: x.json(), buildInfo.records.all()))
    cache = HashResult(records, ['dataset', 'db', 'tpSize'])

    def extractor(rowDict):
        db = rowDict['db']
        if db == 'TGL': db = 'TGK'
        if db == 'TGLI': db = 'TGKI'
        builds = cache.get(rowDict['dataset'], db.upper(), 'T' + rowDict['tp_size'])
        if len(builds) > 0:
            item = builds[0]
            ct = datetime.fromtimestamp(int(item['创建时间'] / 1000)).strftime("%m-%d %H:%M")
            status = item['status'] if 'status' in item else item['phase']
            if len(builds) > 1:
                pre = builds[1]
                pct = datetime.fromtimestamp(int(pre['创建时间'] / 1000)).strftime("%m-%d %H:%M")
                pstatus = pre['status'] if 'status' in pre else pre['phase']
            else:
                pct = None
                pstatus = None
            return [ct, status, pct, pstatus]
        else:
            return [None, None, None, None]

    meta[['vk.m_ct', 'vk.m.status', 'vk.m.pre.ct', 'vk.m.pre.status']] = meta.apply(extractor, axis=1,
                                                                                    result_type="expand")
    return meta


def validBuildT(meta):
    def lastValidBuildTime(row):
        db = row['db']
        if db in latestValidDBCode:
            timeValid = latestValidDBCode[db]
            return timeValid[5:-3]
        else:
            return None
    meta[['code.m.t']] = meta.apply(lastValidBuildTime, axis=1, result_type="expand")
    return meta


def vkMilestoneDeploy(meta):
    deployInfo = vika.datasheet("dst5kWxaj8ciX8kJei", field_key="name")
    records = list(map(lambda x: x.json(), deployInfo.records.all()))
    cache = HashResult(records, ['dataset', 'db', 'tpSize'])

    # st.write('done--------------')

    def extractor(rowDict):
        db = rowDict['db']
        if db.lower() == 'ma':
            if rowDict['lc_m_ct'] is not None:
                return [rowDict['lc_m_ct'], 'SUCCESS']
            else:
                return [None, None]
        deploys = cache.get(rowDict['dataset'], db.lower(), rowDict['tp_size'][1:])
        if len(deploys) > 0:
            item = deploys[0]
            ct = datetime.fromtimestamp(int(item['创建时间'] / 1000)).strftime("%m-%d %H:%M")
            status = item['status'] if 'status' in item else item['phase']
            return [ct, status]
        else:
            return [None, None]

    meta[['vk_deploy_ct', 'vk_deploy_status']] = meta.apply(extractor, axis=1, result_type="expand")
    return meta
    # toPrint1 = meta.pivot(index=['dataset', 'tp_size'], columns='system', values='deploy')
    # st.dataframe(toPrint1.style.highlight_null(), use_container_width=True)


def vkRWTest(test, meta):
    testInfo = vika.datasheet("dstkUjX9kfeAuSumZj", field_key="name")
    records = testInfo.records.filter(test=test)
    recordList = list(map(lambda x: x.json(), records))
    recordList.sort(key=lambda i: i['创建时间'], reverse=True)
    cache = HashResult(recordList, ['test', 'dataset', 'db', 'mTpSize'])

    # st.write(cache._cache)

    def extractT(row):
        dataset = row['dataset']
        tp_size = row['tp_size']
        db = row['db']
        if row['test'] == test:
            cache_item = cache.get(test, dataset, db.lower(), tp_size[1:])
            if len(cache_item) > 0:
                t = cache_item[0]
                ctime = datetime.fromtimestamp(int(t['创建时间'] / 1000)).strftime("%m-%d %H:%M")
                status = t['status'] if 'status' in t else t['phase']
                # st.write(test, dataset, db.lower(), tp_size[1:], ctime, status)
                mark = t['mark'] if 'mark' in t else '-'
                return [ctime, status, mark, t['ID']]
            else:
                return [None, None, '-', None]
        else:
            return [g(row, 'vkt_ct'), g(row, 'vkt.status'), g(row, 'vkt.mark'), g(row, 't.jid')]

    meta[['vkt' + '_ct', 'vkt' + '.status', 'vkt.mark', 't.jid']] = meta.apply(extractT, axis=1, result_type="expand")
    return meta


def lcRWTest(test, meta):
    query = leancloud.Query('TGraphTest')
    query.contains('TestName', test)
    query.equal_to('maxCon', 1)
    query.equal_to('device', 'data')
    query.add_descending('createdAt')
    query.limit(999)
    rows = query.find()

    def timeReformat(tStr):
        t0 = tStr.split(' ')
        t1 = t0[0].split('.')
        t2 = t0[1].split(':')
        month = int(t1[1])
        day = int(t1[2])
        hour = int(t2[0])
        minute = int(t2[1])
        return '{:02d}-{:02d} {:02d}:{:02d}'.format(month, day, hour, minute)

    def rowRefactor(obj):
        ti = TestNameInfo(obj.get('TestName'))
        return {
            'test': ti.test,
            'db': ti.db,
            'dataset': ti.dataset,
            'mtpSize': ti.mtpsize,
            'qtpSize': ti.qtpsize,
            'reqcnt': ti.reqcnt,
            'tname': ti.tn,
            'ct': timeReformat(ti.time),
            'duration': obj.get('duration'),
            'jid': obj.get('jenkinsId'),
            'status': obj.get('status')
        }

    results = list(map(rowRefactor, rows))
    cache = HashResult(results, ['test', 'db', 'dataset', 'mtpSize'])

    def testMetaInfo(row):
        dataset = row['dataset']
        db = row['db']
        tp_size = row['tp_size']
        if row['test'] == test:
            testInfo = cache.get(test, db.lower(), dataset, tp_size[1:])
            # st.write(testInfo)
            if len(testInfo) > 0:
                historyTestT = list(map(lambda obj: obj['duration'], testInfo))
                medianBuildTime = np.median(historyTestT)
                m = testInfo[0]
                # return m[3]
                return [m['ct'], m['duration'], len(historyTestT), medianBuildTime, m['tname']]
            else:
                # return None
                return [None, None, None, None, None]
        else:
            return [g(row, 'lct_ct'), g(row, 'lct.dur_t'), g(row, 'lct.his.cnt'), g(row, 'lct.his.median'),
                    g(row, 'lct.tn')]

    meta[['lct' + '_ct',
          'lct' + '.dur_t',
          'lct' + '.his.cnt',
          'lct' + '.his.median',
          'lct' + '.tn'
          ]] = meta.apply(testMetaInfo, axis=1, result_type="expand")
    # meta['lc_' + test + '_ct']
    return meta


def aliRWTest(test, meta):
    def getRawData(row):
        testFullName = row['lct.tn']
        if type(testFullName) == str:
            try:
                # st.write(testFullName)
                raw = fetchLog(testFullName)
                qCnt = len(raw)
                if qCnt > 0:  # missingCnt += 1
                    # if 'tgc' in testFullName and row['dataset']=='traffic' and row['tp_size']=='.1':
                    #     st.table(raw['txSuccess'])
                    succReq = raw.loc[raw['txSuccess'].astype('bool')]
                    succCnt = len(succReq)
                    avgT = np.mean(succReq['exeTime'])
                    l90 = succReq['exeTime'].quantile(.9)
                    return [int(succCnt * 100 / qCnt), avgT, l90, qCnt]
            except DataNotReadyErr:
                pass
        return [None, None, None, 0]

    meta[['al' + '_succ_ratio',
          'al' + '_avg_t',
          'al' + '_l90_t',
          'al' + '_qcnt',
          ]] = meta.apply(getRawData, axis=1, result_type="expand")
    return meta


timeoutList = '''
%ma$etpc@traffic@syn.all
%ma$reachable@syn.1.all
'''.splitlines()

def timeout(meta):
    pass

@st.cache
def benchmarks():
    available = benchmark()

    def lmd(row):
        arr = row[0].split('_')
        return {
            'dataset': arr[1],
            'tp_size': arr[2][1:],
            'test': arr[3],
            'qcnt': int(arr[4]),
            'ctime': datetime.fromtimestamp(row[1]).strftime("%m-%d %H:%M:%S"),
            'fsize': row[2],
            'f.l.cnt': row[3]
        }

    metaList = list(map(lmd, available))
    metaList.sort(key=lambda i: i['ctime'], reverse=True)
    # st.write(metaList)
    return metaList


def benchmarkStatus(test, benchmarkList, meta):
    cache = HashResult(benchmarkList, ['dataset', 'tp_size', 'test'])

    # st.write(cache._cache)

    def extractB(row):
        dataset = row['dataset']
        tp_size = row['tp_size']
        test = row['test']
        cache_item = cache.get(dataset, tp_size, test)
        if len(cache_item) > 0:
            b = cache_item[0]
            qcntStr = ','.join(list(map(lambda x: str(x['qcnt']), cache_item)))
            return [b['qcnt'], b['f.l.cnt'], b['ctime'], b['fsize'], qcntStr]
        else:
            return [g(row, 'b_qcnt'), g(row, 'b.f.l.cnt'), g(row, 'b_ct'), g(row, 'b_fsize'), g(row, 'b_his')]

    meta[['b_qcnt', 'b.f.l.cnt', 'b_ct', 'b_fsize', 'b_his']] = meta.apply(
        extractB, axis=1, result_type="expand")
    return meta

    # toPrint1 = meta.pivot(index=['dataset', 'tp_size'], columns='qcnt', values='ctime')
    # st.dataframe(toPrint1.style.highlight_null(), use_container_width=True)


def validate(test, meta):
    job = jenkins.job_info('validate')
    lst = job['recent_builds']
    rrr = ResultValidateInfo()

    def getEqualRatio(task):
        sr = rrr.successRatio(task['jid'])
        task['eq_ratio'] = sr
        return task

    lstWithEqRatio = list(map(getEqualRatio, lst))
    cache = HashResult(lstWithEqRatio, ['dataset', 'tp_range', 'test'])

    # st.write(cache._cache)

    def extractV(row):
        dataset = row['dataset']
        tp_size = row['tp_size']
        cache_item = cache.get(dataset, 'T' + tp_size, test)
        if len(cache_item) > 0:
            for v in cache_item:
                if row['db'].lower() in v['db_list'].split(','):
                    ct = datetime.fromtimestamp(v['createdAt']).strftime("%m-%d %H:%M")
                    return [v['eq_ratio'], ct, v['jid']]
        return [None, None, None]

    meta[['v' + '_eq_ratio', 'v' + '_ct', 'v' + '_jid']] = meta.apply(
        extractV, axis=1, result_type="expand")
    return meta


def vkGitCommitInvalid(meta):
    gitCommit = vika.datasheet("dstznKVFdxeol9b6KT", field_key="name")
    records = gitCommit.records.all()
    retestRegexPattern = re.compile(r"\[retest:([^\]]+)\]")
    rebuildRegexPattern = re.compile(r"\[rebuild:([^\]]+)\]")
    itemRP = re.compile(r"[%@$][^%\]@$, ]+")
    rebuildList = {}

    def getKeyList(matches):
        lst = []
        # print(matches)
        if len(matches) == 1:
            for segment in matches[0].split(','):
                for m in re.findall(itemRP, segment):
                    lst.append(m)
        return lst

    for record in records:
        rowDict = record.json()
        comment = rowDict['comment'].replace(' ', '')
        rebuild = getKeyList(re.findall(rebuildRegexPattern, comment))
        retest = getKeyList(re.findall(retestRegexPattern, comment))
        commitTime = rowDict['commit_t'] / 1000

    # def extractor(rowDict):
    #     dataset = rowDict['dataset']
    #     db = rowDict['db'].lower()
    #     tp_size = rowDict['tp_size'][1:]
    #     if
    #     return datetime.fromtimestamp(int(cache[mName]['创建时间'] / 1000)).strftime(
    #         "%m-%d %H:%M") if mName in cache else None

    # meta['vk_m_deploy_t'] = meta.apply(extractor, axis=1, result_type="expand")
    return meta

    def gitCommitInfo(row):
        dataset = row['dataset']
        db = row['db']
        tp_size = row['tp_size']

    lastValidT = latestBuildFor(db.lower(), dataset, 'space')
    # if lastValidT > 0:  # 状态已知
    #     fm_dt = datetime.fromtimestamp(lastValidT).strftime("%m-%d %H:%M")
    #     if fm_dt < m['time']:  # 无需rebuild
    #         return [fm_dt, ]
    #         # completeTime += buildTime
    #     else:  # 确定需要rebuild
    #         pass
    #         # rebuildTime += buildTime
    # else:  # 状态未知，需要rebuild
    #     pass
    #     # rebuildTime += buildTime

    # fig = go.Figure(data=[
    #     go.Table(
    #         header=dict(
    #             values=list(meta.columns),
    #             fill_color="gray",
    #             align='left'
    #         ),
    #         cells=dict(values=[
    #             meta['dataset'], meta['tp_size'], meta.system, meta.cell_content
    #         ])
    #     )
    # ])
    #
    # def update_point(trace, points, selector):
    #     print(trace, points, selector)
    #
    # fig.data[0].on_click(update_point)
    #
    # fig.update_layout(margin=dict(l=5, r=5, b=10, t=10), paper_bgcolor='gray')
    # fig.update_traces(customdata=meta['duration'], selector=dict(type='table'))

def actionPriority(meta):
    def priority(row):
        dataset = row['dataset']
        tp_size = row['tp_size']
        test = row['test']
        cache_item = cache.get(dataset, tp_size, test)
        if len(cache_item) > 0:
            b = cache_item[0]
            qcntStr = ','.join(list(map(lambda x: str(x['qcnt']), cache_item)))
            return [b['qcnt'], b['ctime'], b['fsize'], qcntStr]
        else:
            return [g(row, 'b_qcnt'), g(row, 'b_ct'), g(row, 'b_fsize'), g(row, 'b_his')]

    meta[['priority']] = meta.apply(priority, axis=1, result_type="expand")
    return meta



def actionRow(r, action=None, device='data'):
    # return
    if action is None:
        action = r['action']
    if action == 'build':
        jenkins.build(action, {
            'dataset': r['dataset'],
            'data_size': 'T' + r['tp_size'],
            'db_name': r['db'].upper(),
            'device': device
        })
        return "Build " + r['db'] + '@' + r['dataset'] + r['tp_size'] + ' on ' + device
    elif action == 'deploy':
        dbType = 'postgresql' if r['db'].lower() == 'pg' else 'neo4j'
        if 'tgl' in r['db'].lower():
            dbType = 'tcypher'
            db = r['db'].lower().replace('tgl', 'tgk')
        else:
            db = r['db'].lower()
        jenkins.build(action, {
            'target_host': device,
            'milestone': 'm_' + r['dataset'] + '_' + db + '_' + r['tp_size'][1:],
            'dbtype': dbType
        })
        return "Deploy " + r['db'] + '@' + r['dataset'] + r['tp_size']
    elif action == 'test':
        if np.isnan(r['b_qcnt']):
            st.write('invalid benchmark', r)
        needResult = 'true' if r['dataset'] != 'syn' else 'false'
        jenkins.build(action, {
            'db_host': 'data',
            'milestone': 'm_' + r['dataset'] + '_' + r['db'].lower() + '_' + r['tp_size'][1:],
            'benchmark_name': 'b_' + r['dataset'] + '_T' + r['tp_size'] + '_' + r['test'] + '_' + str(int(r['b_qcnt'])),
            'max_connection': 1,
            'need_result': needResult,
            'debug': 'false',
            'timeout': 3600,
            'timeout_p': 1800,
        })
        return "Test " + r['test'] + ':' + r['db'] + '@' + r['dataset'] + r['tp_size'] + ' on ' + device + ('(no result)' if needResult == 'false' else '')
    elif action == 'validate':
        jenkins.build(action, {
            'db_host': device,
            'db_list': ','.join(r['db']),
            'dataset': r['dataset'],
            'test': r['test'],
            'tp_range': 'T' + r['tp_size'],
            'req_cnt': r[''],
            'skip_sample': 'true',
            'skip_test': 'true',
            'skip_validate': 'false',
            'max_connection': 1,
            'timeout': 1800,
            'failfast': 'false',
        })
        return "Validate " + ','.join(r['db']) + '@' + r['dataset'] + r['tp_size']
    else:
        raise NotReadyErr(action, 'unknown action')


class NotReadyErr(Exception):
    def __init__(self, action, row, last_t, msg):
        self.action = action
        self.row = row.to_dict()
        self.last_t = last_t
        self.message = msg

    def __str__(self):  # 这里就是异常的字符串信息
        return self.message


class TestStatus:
    def __init__(self, row):
        self.r = row.to_dict()

    def validMilestone(self):
        return self.r['lc_m_ct'] is not None and \
               ((self.r['code.m.t'] is None) or self.r['lc_m_ct'] > self.r['code.m.t'])

    def validDeploy(self):
        r = self.r
        if self.validMilestone():
            if r['vk_deploy_ct'] is not None:  # and r['db'] != 'MA':
                return r['vk_deploy_status'] == 'SUCCESS' and r['vk_deploy_ct'] >= r['lc_m_ct']
        return False

    def validTest(self):
        # if self.validDeploy():
        ss = self.r['al_succ_ratio']
        return not (ss is None or np.isnan(ss)) and ss > 0
        # return False

    def needRepair(self):
        pass



def actionToTake(meta):
    def calcAction(vm, vd, vt):
        if vt:
            return 'skip'
        elif vm:
            if vd:
                return 'test'
            else:
                return 'deploy'
        else:
            return 'build'

    def bool2int(b):
        return 1 if b else 0

    def calcStatus(row):
        ts = TestStatus(row)
        vm = ts.validMilestone()
        vd = ts.validDeploy()
        vt = ts.validTest()
        action = calcAction(vm, vd, vt)
        db = row['db'].lower()
        if 'tgl' in db and action == 'build':
            action = 'await(dep)'
        if db in ['pg', 'ma'] and action == 'deploy':
            action = 'test'
        if row['vk_deploy_ct'] > '01-11 00:00':
            action = 'deploy' if db != 'ma' else 'build'
        return bool2int(vm), bool2int(vd), bool2int(vt), action

    meta[['v.m', 'v.d', 'v.t', 'action']] = meta.apply(calcStatus, axis=1, result_type="expand")
    return meta


def recentJobInfo(lst, cnt=5):
    recentRunning = list(filter(lambda j: j['building'], lst))
    recentRunning.sort(key=lambda x: x['createdAt'], reverse=True)

    def toStr(jj):
        if 'target_host' in jj:
            return jj['milestone'] + '@' + jj['target_host']
        elif 'simple_mode' in jj:
            return jj['test'] + ': ' + jj['db_list'] + '@' + jj['dataset'] + jj['tp_range'] + jj['req_cnt']
        elif 'benchmark_name' in jj:
            return jj['benchmark_name'] + '@' + jj['milestone'] + '(' + jj['max_connection'] + ')'
        else:
            return jj['db_name'] + '@' + jj['dataset'] + jj['data_size'][1:] + ' on ' + jj['device']

    return list(map(lambda x: {'paramStr': toStr(x), 'raw': x}, recentRunning[:cnt]))


st.sidebar.markdown('# 最近状态')

# st.markdown('''
# 这里展示了`读写测试`中所有测试项的状态和进展。每个测试项由db、dataset、tp_size、test唯一标识。
# 每个测试都需要
# 1. 构建milestone（含有一定数据的基础数据库）
# 2. 部署1中的milestone到测试机（）
# 3. 生成benchmark（请求列表）
# 4. 运行test（启动2中数据库并把3中查询请求发送获得结果）
# 5. 校验validate（把相同请求在不同系统上得到请求结果进行对比，判断正确性）
# ''')
nodeStatus = {
    'data': 'offline',
    'ssworkstation': 'offline',
    'zhworkstation': 'offline',
}
with st.spinner('Wait for it...'):
    pp = st.sidebar.progress(0)
    allRecentJobs = recentJenkinsJob()
    for task in allRecentJobs:
        running = recentJobInfo(allRecentJobs[task]['recent_builds'])
        if len(running) > 0:
            st.sidebar.markdown('## ' + task)
        for job in running:
            st.sidebar.write(job['paramStr'])

    nodes = jenkinsNodeRunningBuilds()
    allBusy = True
    for node in nodes:
        jNode = nodes[node][0]
        if jNode['offline']:
            nodeStatus[node] = 'offline'
        elif jNode['idle']:
            nodeStatus[node] = 'idle'
            allBusy = False
        else:
            nodeStatus[node] = 'busy'
        nodeName = node.replace('workstation', '').upper()
        if nodeStatus[node] == 'busy':
            for job in nodes[node][1]:
                jName = job['fullDisplayName'].replace('milestone-build-master', 'build').replace('milestone-deploy', 'deploy')
                st.sidebar.info(f"{nodeName}: {jName}")
        elif nodeStatus[node] == 'idle':
            st.sidebar.success(f"{nodeName}: Idle")
        else:
            st.sidebar.error(f"{nodeName}: Offline")

    meta = allTests()  # milestones()
    pp.progress(5)
    meta = lcMilestoneBuild(meta)
    pp.progress(8)
    meta = validBuildT(meta)
    pp.progress(10)

    bLst = benchmarks()
    pppp = 30
    pp.progress(12)
    for test in readWriteTests:
        meta = benchmarkStatus(test, bLst, meta)
        pp.progress(pppp)
        pppp += 2

    if allBusy:
        st.sidebar.warning("Busy running.")

    meta = vkMilestoneBuild(meta)
    pp.progress(15)
    meta = vkMilestoneDeploy(meta)
    pp.progress(25)

    for test in readWriteTests:
        meta = vkRWTest(test, meta)
        meta = lcRWTest(test, meta)
        meta = aliRWTest(test, meta)
        pp.progress(pppp)
        pppp += 2

    # pp.progress(60)
    # meta = validate('ehistory', meta)
    # meta = benchmarkStatus('snapshot', bLst, meta)
    pp.progress(75)
    meta = actionToTake(meta)
    # meta = benchmarkStatus('aggmax', bLst, meta)
    # pp.progress(80)
    # meta = benchmarkStatus('etpc', bLst, meta)
    # pp.progress(85)
    # meta = benchmarkStatus('reachable', bLst, meta)
    # pp.progress(90)
    # meta = benchmarkStatus('append', bLst, meta)
    # pp.progress(95)
    # meta = benchmarkStatus('update', bLst, meta)
    pp.progress(100)
    gd = GridOptionsBuilder.from_dataframe(meta)
    gd.configure_default_column(groupable=True, sorteable=True, enableRowGroup=True, enablePivot=True, enableValue=True,
                                allowedAggFuncs=['sum', 'avg', 'count', 'min', 'max', 'first', 'last'])
    # selectedRowIndexes = []
    gd.configure_selection(selection_mode='multiple', use_checkbox=True)  # , pre_selected_rows=[]
    gd.configure_columns(['al_succ_ratio', 'v_eq_ratio'], cellStyle=JsCode('''
        function(param){
          //console.log(param.value, param);
          var d = param.data;
          var v = param.value;
          if(v){
              if(v==100){
                return {'backgroundColor': 'lightgreen'}
              }else{
                return {'backgroundColor': 'orange'}
              }
          }else{
            return {'backgroundColor': 'red'}
          }
        };'''),
                         # pivot=True
                         )
    gd.configure_columns(['vk_deploy_ct', 'vkt_ct'], cellStyle=JsCode('''
            function(param){
              //console.log(param.value, param);
              var nameStatusMap = {'vk_deploy_ct': 'vk_deploy_status', 'vkt_ct': 'vkt.status'};
              var col = param.colDef.field;
              var d = param.data;//console.log('d.status', d.vk_deploy_status);
              var v = param.value;
              if(v){
                  var vv = d[nameStatusMap[col]];
                  if(vv=='SUCCESS'){
                    return {'backgroundColor': 'lightgreen'}
                  }else if(vv=='STARTED'){
                    return {'backgroundColor': 'lightblue'}
                  }else{
                    return {'backgroundColor': 'orange'}
                  }
              }else{
                return {'backgroundColor': 'red'}
              }
            };''')
                         )
    gd.configure_columns(['lct_ct'], cellStyle=JsCode('''
                function(param){
                  //console.log(param.value, param);
                  var timePreMap = {'lct_ct': 'vkt_ct'};
                  var col = param.colDef.field;
                  var d = param.data;//console.log('d.status', d.vk_deploy_status);
                  var v = param.value;
                  if(v){
                      if(d[timePreMap[col]]<=v){
                        return {'backgroundColor': 'lightgreen'}
                      }else{
                        return {'backgroundColor': 'orange'}
                      }
                  }else{
                    return {'backgroundColor': 'red'}
                  }
                };''')
                         )
    columnsInitHide = filter(
        lambda c: not (c in ['dataset', 'db', 'tp_size', 'action'] or c.endswith('_ct') or 'ratio' in c or 'l90' in c),
        list(meta.columns))
    gd.configure_columns(list(columnsInitHide), hide=True)
    gd.configure_columns(list(filter(lambda c: 'dur' in c, list(meta.columns))), aggFunc='sum')
    gd.configure_grid_options(
        sideBar=True,
        statusBar={
            'statusPanels': [{
                'statusPanel': 'agAggregationComponent',
                'statusPanelParams': {
                    'aggFuncs': ['avg', 'sum']
                }
            }]
        },
        # pivotMode=True,
        enableCharts=True,
        pivotPanelShow='onlyWhenPivoting',
        getRowId=JsCode(
            '''function(params){var d=params.data;return d.db+'_'+d.dataset+'_'+d.tp_size+'_'+d.test;}''').js_code,
        enableRangeSelection=True,
        rowGroupPanelShow='always',
        onRangeSelectionChanged=JsCode('''
function(event) {
  var api = event.api;
  var rowModel = api.getModel();
  var cellRanges = api.getCellRanges();
  //console.log(cellRanges);
  if (cellRanges) {
    cellRanges.forEach(function (range) {
      /** get starting and ending row, remember rowEnd could be before rowStart **/
      var startRow = Math.min(range.startRow.rowIndex, range.endRow.rowIndex);
      var endRow = Math.max(range.startRow.rowIndex, range.endRow.rowIndex);
      for (var rowIndex = startRow; rowIndex <= endRow; rowIndex++) {
        range.columns.forEach(function (column) {
          var rowNode = rowModel.getRow(rowIndex);
          //console.log(column.colId, rowNode.data);
          var value = api.getValue(column, rowNode);
          if (typeof value === 'number') {
          //  sum += value;
          }
        });
      }
    });
  }
}
        ''').js_code,
        firstDataRendered=JsCode('''
function(event){{
  var api = event.api;
  var columnApi = event.columnApi;
  api.autoSizeColumns();
  columnApi.autoSizeColumns({}, true)
}};
        '''.format(list(meta.columns))).js_code
    )
    gd.configure_pagination(paginationAutoPageSize=False, paginationPageSize=20)
    license_key = "For_Trialing_ag-Grid_Only-Not_For_Real_Development_Or_Production_Projects-Valid_Until-18_March_2021_[v2]_MTYxNjAyNTYwMDAwMA==948d8f51e73a17b9d78e03e12b9bf934"
    gOpts = gd.build()
    # st.write(gOpts)
    table = AgGrid(meta,
                   gridOptions=gOpts,
                   allow_unsafe_jscode=True,
                   width='100%',
                   reload_data=False,
                   fit_columns_on_grid_load=True,
                   # license_key=license_key,
                   enable_enterprise_modules=True,
                   data_return_mode=DataReturnMode.FILTERED_AND_SORTED,
                   update_mode=GridUpdateMode.MANUAL)


    def update_point(*args):
        r = args[0]
        job = args[1]
        device = args[2]
        msg = actionRow(job, r, device)
        st.sidebar.info(msg)


    device = st.sidebar.radio('Machine', ('data', 'ss', 'zh'), horizontal=True)
    action = st.sidebar.radio('Action', ('build', 'deploy', 'test', 'validate'), horizontal=True)
    if len(table['selected_rows']) > 0:
        st.table(table['selected_rows'])
        for slr in table['selected_rows']:
            st.sidebar.button(action + " " + slr['db'] + '@' + slr['dataset'] + slr['tp_size'] + ' on ' + device,
                              help="Submit a Jenkins job execution",
                              on_click=update_point,
                              args=(
                                  slr, action,
                                  device))  # kwargs=slr, args=(slr['system'], slr['dataset'], slr['tp_size'])
    # if btn:
    #     st.info('btn clicked')
# st.success('Done!')
# st.stop()
def idleClient():
    if nodeStatus['zhworkstation'] == 'idle':
        return 'zhworkstation'
    if nodeStatus['ssworkstation'] == 'idle':
        return 'ssworkstation'
    return 'none'

st.stop()

if nodeStatus['data'] == 'idle':
    if idleClient() == 'none':
        shouldBuildRows = meta[
            meta['action'].isin(['deploy']) &  # 'build',
            meta['db'].isin(['TGK', 'TGS', 'TGC', 'PG', 'TGL', 'TGLI', 'N1', 'N2'])  #
            # (meta['dataset'] == 'energy') &
        ]  #
    else:
        shouldBuildRows = meta[
            meta['action'].isin(['deploy', 'test']) &  # 'build',
            # meta['db'].isin(['MA', 'TGKI']) &  #'TGK', 'TGS', 'TGC', 'PG', 'TGL', 'TGLI', 'N1', 'N2'
            (meta['vkt.mark'] == '-') &
            (meta['b.f.l.cnt'] >= 10) &
            # (meta['lct_ct'].isnull()) &
            (~meta['test'].isin(['ehistory', 'snapshot', 'aggmax', 'etpc', 'reachable']))
        ]  # , 'append'
    shouldBuildRows = shouldBuildRows.sort_values(['action', 'vkt_ct'], ignore_index=True)
    nodeToRun = 'data'
elif idleClient() != 'none':
    shouldBuildRows = []
    # meta[
    #     meta['action'].isin(['build']) & (~meta['db'].isin(['PG', 'MA']))
    #     # (meta['dataset'] == 'energy') &
    #     # (meta['test'] != 'etpc')
    # ]  #
    # shouldBuildRows = shouldBuildRows.sort_values(['dataset', 'lc_m_ct'], ignore_index=True)
    nodeToRun = idleClient()
else:
    shouldBuildRows = []
    nodeToRun = 'none'

if len(shouldBuildRows) > 0:
    # st.dataframe(sortedList)
    # sortedList = shouldBuildRows
    sortedList = shouldBuildRows.sample(frac=1)  # 随机化是为了避免构建失败而重复构建。
    r = sortedList.head(1).to_dict('records')[0]
    action = r['action']
    try:
        st.sidebar.info(actionRow(r, action=action, device=nodeToRun))
    except TypeError as e:
        st.sidebar.warning('jenkins err, try later~')
else:
    st.sidebar.warning('nothing to do~')

# st.markdown('''
# <div id=""></div>
# ''', unsafe_allow_html=True)


# st.markdown('## STEP1：构建')
# meta = milestoneStatus()
# import time

# time.sleep(2)
# st.markdown('## STEP2：部署')
# deployStatus(meta)

# st.markdown('## STEP3：生成（benchmark）& 测试（Test）')
# benchmarkStatus('ehistory')
# benchmarkStatus('snapshot')
# benchmarkStatus('aggmax')
# benchmarkStatus('etpc')
# benchmarkStatus('reachable')
# benchmarkStatus('append')
# benchmarkStatus('update')

# st.markdown('## STEP4：校验（validate）')
#
# st.markdown('## STEP5：')
# st.dataframe(meta, use_container_width=True)
