# -*- coding: utf-8 -*-
"""
Created on 2022/9/29

@author: Song
"""
import numpy as np
import pandas as pd
import math
from DBTestAnalysisLib import *
import plotnine as G
import seaborn as sns
import matplotlib.pyplot as plt
import altair as alt
import plotly.express as px
import streamlit as st
from st_aggrid import *
from vika import Vika

vika = Vika("uskaYYRdtqFoPqcVw3KM2Kf")

st.set_page_config(page_title="读写测试", page_icon="📈", layout="wide")
#      <!-- From SVG stripe generator https://www.coffee-break-designs.com/labs/svg_stripe_generator/ -->
st.write(
    '''<svg height="0" width="0" xmlns="http://www.w3.org/2000/svg" version="1.1"><defs><pattern id="diagonal_up_left" patternUnits="userSpaceOnUse" width="9.5" height="9.5" patternTransform="rotate(-45)"><line x1="0" y="0" x2="0" y2="9.5" stroke="#000000" stroke-width="4" /></pattern><pattern id="diagonal_up_right" patternUnits="userSpaceOnUse" width="9.5" height="9.5" patternTransform="rotate(45)"><line x1="0" y="0" x2="0" y2="9.5" stroke="#000000" stroke-width="4" /></pattern><pattern id="cross_hatch" patternUnits="userSpaceOnUse" width="9.5" height="9.5" patternTransform="rotate(45)"><line x1="0" y="0" x2="9.5" y2="0" stroke="#000000" stroke-width="4" /><line x1="0" y="0" x2="0" y2="9.5" stroke="#000000" stroke-width="4" /></pattern></defs></svg>''',
    unsafe_allow_html=True)


def hashDefaultGetFunc(item, key):
    return item[key]


def lcGetFunc(item, key):
    return item.get(key)


def g(row, key):
    return row[key] if key in row else None


class HashResult:
    def __init__(self, data, keys, getFunc=hashDefaultGetFunc):
        self._data = data
        self._keys = keys
        self._cache = {}
        for item in data:
            cache_key = '_'.join(list(map(lambda k: getFunc(item, k), keys)))
            if cache_key in self._cache:
                self._cache[cache_key].append(item)
            else:
                self._cache[cache_key] = [item]

    def get(self, *args):
        cache_key = '_'.join(args)
        if cache_key in self._cache:
            return self._cache[cache_key]
        else:
            return []


readWriteTests = ['ehistory', 'snapshot', 'aggmax', 'etpc', 'reachable', 'update', 'append']  #
dbList = ['TGC', 'TGK', 'TGL', 'TGLI', 'TGKI', 'TGS', 'N1', 'N2', 'PG', 'MA']
datasetList = ['energy', 'traffic', 'syn']


def allTests():
    metaList = []
    for db in ['TGC', 'TGK', 'TGL', 'TGS', 'N1', 'N2', 'PG', 'MA']:  #
        for dataset in datasetList:  #
            for mSize in ['.1', '.all']:  # '.01', '.5', '.9',
                for test in readWriteTests:  # , 'w5r5', 'w1r3','w3r1'
                    if (test == 'reachable' and (dataset == 'energy' or db == 'TGL')) or (
                            test == 'ehistory' and db == 'TGL'):
                        continue
                    else:
                        metaList.append({'db': db, 'dataset': dataset, 'tp_size': mSize, 'test': test})

    for db in ['TGKI', 'TGLI']:
        for dataset in datasetList:  #
            for mSize in ['.1', '.all']:  # '.01', '.5', '.9',
                for test in ['aggmax', 'etpc', 'update']:
                    metaList.append({'db': db, 'dataset': dataset, 'tp_size': mSize, 'test': test})

    meta = pd.DataFrame(metaList)
    meta['dataset'] = pd.Categorical(meta['dataset'], ["energy", "traffic", "syn"])
    return meta


def lcRWTest(test, meta):
    query = leancloud.Query('TGraphTest')
    query.contains('TestName', test)
    query.equal_to('maxCon', 1)
    query.equal_to('device', 'data')
    query.add_descending('createdAt')
    query.limit(999)
    rows = query.find()

    def timeReformat(tStr):
        t0 = tStr.split(' ')
        t1 = t0[0].split('.')
        t2 = t0[1].split(':')
        month = int(t1[1])
        day = int(t1[2])
        hour = int(t2[0])
        minute = int(t2[1])
        return '{:02d}-{:02d} {:02d}:{:02d}'.format(month, day, hour, minute)

    def rowRefactor(obj):
        ti = TestNameInfo(obj.get('TestName'))
        return {
            'test': ti.test,
            'db': ti.db,
            'dataset': ti.dataset,
            'mtpSize': ti.mtpsize,
            'qtpSize': ti.qtpsize,
            'reqcnt': ti.reqcnt,
            'tname': ti.tn,
            'ct': timeReformat(ti.time),
            'duration': obj.get('duration'),
            'jid': obj.get('jenkinsId'),
            'status': obj.get('status')
        }

    results = list(map(rowRefactor, rows))
    cache = HashResult(results, ['test', 'db', 'dataset', 'mtpSize'])

    def testMetaInfo(row):
        dataset = row['dataset']
        db = row['db']
        tp_size = row['tp_size']
        if row['test'] == test:
            testInfo = cache.get(test, db.lower(), dataset, tp_size[1:])
            # st.write(testInfo)
            if len(testInfo) > 0:
                historyTestT = list(map(lambda obj: obj['duration'], testInfo))
                medianBuildTime = np.median(historyTestT)
                m = testInfo[0]
                # return m[3]
                return [m['ct'], m['duration'], len(historyTestT), medianBuildTime, m['tname']]
            else:
                # return None
                return [None, None, None, None, None]
        else:
            return [g(row, 'lct_ct'), g(row, 'lct.dur_t'), g(row, 'lct.his.cnt'), g(row, 'lct.his.median'),
                    g(row, 'lct.tn')]

    meta[['lct' + '_ct',
          'lct' + '.dur_t',
          'lct' + '.his.cnt',
          'lct' + '.his.median',
          'lct' + '.tn'
          ]] = meta.apply(testMetaInfo, axis=1, result_type="expand")
    # meta['lc_' + test + '_ct']
    return meta


def vkRWTest(test, meta):
    testInfo = vika.datasheet("dstkUjX9kfeAuSumZj", field_key="name")
    records = testInfo.records.filter(test=test)
    recordList = list(map(lambda x: x.json(), records))
    recordList.sort(key=lambda i: i['创建时间'], reverse=True)
    # print(test, len(recordList))
    cache = HashResult(recordList, ['test', 'dataset', 'db', 'mTpSize'])

    # st.write(cache._cache)

    def extractT(row):
        dataset = row['dataset']
        tp_size = row['tp_size']
        db = row['db']
        if row['test'] == test:
            cache_item = cache.get(test, dataset, db.lower(), tp_size[1:])
            if len(cache_item) > 0:
                t = cache_item[0]
                ctime = datetime.fromtimestamp(int(t['创建时间'] / 1000)).strftime("%m-%d %H:%M")
                status = t['status'] if 'status' in t else t['phase']
                # st.write(test, dataset, db.lower(), tp_size[1:], ctime, status)
                mark = t['mark'] if 'mark' in t else '-'
                # if ('mark' in t) and t['mark'] is not None:
                #     print(t['mark'], type(t['mark']))
                return [ctime, status, mark, t['ID']]
            else:
                return [None, None, '-', None]
        else:
            return [g(row, 'vkt_ct'), g(row, 'vkt.status'), g(row, 'mark'), g(row, 't.jid')]

    meta[['vkt_ct', 'vkt.status', 'mark', 't.jid']] = meta.apply(extractT, axis=1, result_type="expand")
    return meta


def aliRWTest(test, meta):
    def getRawData(row):
        testFullName = row['lct.tn']
        if type(testFullName) == str:
            try:
                # st.write(testFullName)
                raw = fetchLog(testFullName)
                qCnt = len(raw)
                if qCnt > 0:  # missingCnt += 1
                    raw = extractParam2Col(raw, ['id'])
                    # if 'tgc' in testFullName and row['dataset']=='traffic' and row['tp_size']=='.1':
                    #     st.table(raw['txSuccess'])
                    succReq = raw.loc[raw['txSuccess'].astype('bool')]
                    succCnt = len(succReq)
                    avgT = np.mean(succReq['exeTime'])
                    l90 = succReq['exeTime'].quantile(.9)
                    std = np.std(succReq['exeTime'])
                    return [succCnt, avgT, l90, qCnt, list(zip(list(succReq['id']), list(succReq['exeTime']))),
                            "{:.0f}±{:.0f}({:.0f})".format(avgT, std, succCnt)]
            except DataNotReadyErr:
                pass
        return [None, None, None, 0, [], None]

    meta[['al' + '_succ_cnt',
          'al' + '_avg_t',
          'al' + '_l90_t',
          'al' + '_qcnt',
          'al.raw',
          'al.content'
          ]] = meta.apply(getRawData, axis=1, result_type="expand")
    return meta


option = st.sidebar.selectbox(
    '表格内容',
    ('l90(qCnt)', 'mean(qCnt)'))


def groupByTest(test, allTestMeta):
    meta = allTestMeta[allTestMeta['test'] == test]
    toPrint1 = meta.pivot(index='db', columns=['dataset', 'tp_size'], values='al.content')
    test.upper() + '[RW]: Available test results on device(data)'
    toPrint1 = toPrint1.style \
        .highlight_null(null_color='red')
    # .format("{:.0f}", na_rep="-") \
    # .background_gradient(cmap='autumn', vmin=0, vmax=np.max(meta['al_qcnt']))\

    # print(meta['al.raw'])
    st.dataframe(toPrint1, use_container_width=True)  # apply(highlight_invalid).
    # cols = st.columns(4)
    # cols[0].metric(label="已完成（浅绿）", value=timeHumanRead(np.sum(meta[''])))  # , delta="{}项".format(knownCnt)
    # missingTime = missingCnt * (completeTime + rebuildTime) / knownCnt
    # cols[1].metric(label="重构项（灰色）需时", value=timeHumanRead(rebuildTime))
    # cols[2].metric(label="缺失项（红色）需时（估计）", value=timeHumanRead(missingTime), delta="{}项".format(missingCnt))
    # cols[3].metric(label="全部工作量（估计）", value=timeHumanRead(completeTime + rebuildTime + missingTime), delta="共{}项".format(knownCnt+missingCnt))meta

    data = meta.explode('al.raw').reset_index()
    # timeResult(test, data)

    # st.dataframe(data, height=250, use_container_width=True)
    # plt.figure(figsize=(20, 8))
    # g = sns.FacetGrid(data, col="tp_size", row="dataset", margin_titles=True)
    # g.map_dataframe(
    # sns.swarmplot(x='db', y='al.raw', data=data, color='blue', size=4)
    # sns.stripplot(x='db', y='al.raw', data=data, color='blue', alpha=0.3, size=4)
    # sns.violinplot(x="db", y="al.raw", data=data, cut=0, inner='stick')
    # st.pyplot(plt)

    idCntMap = {}
    dbMap = {}

    def extractRaw(row):
        db = row['db']
        idAndTime = row['al.raw']
        dbMap[db] = True
        # print(idAndTime, type(idAndTime))
        id = idAndTime[0]
        if id in idCntMap:
            idCntMap[id] += 1
        else:
            idCntMap[id] = 1
        return idAndTime

    # data = data.drop(data[np.isnan(data['al.raw'])].index)
    data = data.dropna(subset=['al.raw'])
    data[['req.id', 'exe.time']] = data.apply(extractRaw, axis=1, result_type="expand")

    def addFreq(row):
        reqId = row['req.id']
        reqIdInt = int(reqId)
        return reqIdInt, idCntMap[reqId]

    data[['req.id.int', 'id.freq']] = data.apply(addFreq, axis=1, result_type="expand")

    dbCnt = 0
    for k in dbMap:
        dbCnt += 1

    data = data[data['id.freq'] >= dbCnt]
    # fig0 = px.histogram(data, x="id.freq")
    # st.plotly_chart(fig0)
    # st.dataframe(data, use_container_width=True)
    # fig3d = px.scatter_3d(data, x='req.id', y='exe.time', z='id.freq',
    #                     color='db', size='id.freq', size_max=18, opacity=0.7)  # symbol='species',
    # st.plotly_chart(fig3d)
    # fig = px.strip(data, x="db", y="exe.time", color="db",  # size="id.freq",
    #                facet_row="dataset", facet_col="tp_size")

    # fig0 = px.box(data, y="exe.time", x="db", color="db", points="all",  # box=True,
    #                 facet_row="dataset",
    #                 # log_y=True,
    #                 facet_col="tp_size", hover_data=['exe.time'])
    # # fig0.update_traces(width=1.0)
    # # fig.update_layout(barmode='group', bargap=0.0, bargroupgap=0.0)
    # # fig1 = fig0.update_yaxes(matches=None, showticklabels=True)
    # st.plotly_chart(fig)  # , use_container_width=True
    bar = alt.Chart(meta).mark_bar().encode(
        y=alt.Y("al_l90_t",
                axis=alt.Axis(format="~s"),
                title=None,
                scale=alt.Scale(type="symlog")),  #
        x=alt.X("db", title=None),
        color=alt.Color("db"),
        tooltip=["db", "lct_ct", "al_l90_t", "al_succ_cnt", "al_qcnt"]
    ).properties(
        width=160,
        height=100
    )
    text = bar.mark_text(
        # align='center',
        baseline='bottom'
    ).encode(
        text=alt.Text('al_l90_t', format='.2s')
    )  # .transform_calculate(label='datum. + ""')
    c = alt.layer(bar, text).facet(
        # c = bar.facet(
        facet='dataset',
        columns=3
    ).resolve_scale(
        y='independent'
    )

    st.altair_chart(c)


def groupByDataset(dataset, allTestMeta):
    meta = allTestMeta[allTestMeta['dataset'] == dataset]
    toPrint1 = meta.pivot(index='db', columns=['test'], values='display_content')
    # toPrint1 = toPrint1.rename_axis('db').reset_index()
    # toPrint1 = toPrint1.style \
    #     .highlight_min() \
    #     .highlight_null(null_color='red')  # .format("{:.0f}", na_rep="-").bar(color='#FFA500')
    # grid_return = AgGrid(toPrint1, key='grid-'+dataset)
    st.dataframe(toPrint1)
    # st.dataframe(meta)
    #
    bar = alt.Chart(meta).mark_bar().encode(
        y=alt.Y("al_l90_t",
                axis=alt.Axis(format="~s"),
                title=None,
                scale=alt.Scale(type="symlog")),  #
        x=alt.X("db", title=None),
        color=alt.Color("db"),
        tooltip=["db", "lct_ct", "al_l90_t", "al_succ_cnt", "al_qcnt"]
    ).properties(
        width=160,
        height=180
    )
    text = bar.mark_text(
        # align='center',
        baseline='bottom'
    ).encode(
        text=alt.Text('al_l90_t', format='.2s')
    )  # .transform_calculate(label='datum. + ""')
    c = alt.layer(bar, text).facet(
        # c = bar.facet(
        facet='test',
        columns=3
    ).resolve_scale(
        y='independent'
    )
    st.altair_chart(c)


def groupByDB(db, allTestMeta):
    meta = allTestMeta[(allTestMeta['db'] == db)]
    # st.dataframe(meta)
    bar = alt.Chart(meta).mark_bar().encode(
        y=alt.Y("al_l90_t",
                axis=alt.Axis(format="~s"),
                title=None,
                scale=alt.Scale(type="symlog")),  #
        x=alt.X("test", title=None),
        tooltip=["db", "lct_ct", "al_l90_t", "al_succ_cnt", "al_qcnt"]
    ).properties(
        width=160,
        height=100
    )
    text = bar.mark_text(
        # align='center',
        baseline='bottom'
    ).encode(
        text=alt.Text('al_l90_t', format='.2s')
    )  # .transform_calculate(label='datum. + ""')
    c = alt.layer(bar, text).facet(
        # c = bar.facet(
        facet='dataset',
        columns=3
    ).resolve_scale(
        y='independent'
    )
    st.altair_chart(c)


def timeResult(test, data):
    # plt.figure(figsize=(12, 8))
    # sns.violinplot(x='price', y='color', data=df, color='yellow', cut=0, inner='stick')
    dodge_text = G.position_dodge(width=.9)
    g = G.ggplot(data, G.aes(x='db', y='al.raw', fill='db'))
    g = g + G.stat_summary(fun_y=np.mean, geom='bar', position=dodge_text, width=.9)
    # g = g + G.geom_violin(style='left-right')
    # g = g + stat_summary(fun_y=np.mean, geom=geom_text(, size=8))
    g = g + G.geom_boxplot(width=.3, alpha=0.7, outlier_size=1, outlier_shape='.', outlier_colour="steelblue",
                           position=dodge_text)
    # g = g + G.geom_jitter(alpha=0.3, shape='.', size=0.5, stroke=0.3)
    g = g + G.geom_text(G.aes(label=G.after_stat('y')), stat=G.stat_summary(fun_y=np.count_nonzero),
                        # size=7, color='black',
                        position=dodge_text, format_string='{:.1f}', va='bottom', nudge_y=40)
    # g = g + G.geom_point()
    # g = g + lims(y=-2500)
    g = g + G.facet_grid('dataset ~ tp_size', scales="free")
    # g = g + G.facet_grid('node ~ dataset')  # row ~ column , scales="free"
    # g = g + G.facet_wrap('dataset', ncol=3) #   scales="free_y",
    # g = g + G.ylim(0, 5000)
    # g = g + xlim(-1, 3700)
    # g = g + G.scale_y_log10()
    g += G.ylab('Request Execution Time (ms)')

    g = g + G.theme(
        # axis_text_x=G.element_blank(),  # G.element_text(rotation=30, hjust=0.7, colour="black", size=8), #
        # legend_position='bottom',
        # panel_background=G.element_blank(),
        figure_size=(20, 8))
    g = g + G.ggtitle(test)
    st.pyplot(g.draw())


empty = st.empty()
my_progress = empty.progress(0)
meta = allTests()
pppp = 5
for test in readWriteTests:
    meta = lcRWTest(test, meta)
    meta = vkRWTest(test, meta)
    my_progress.progress(pppp)
    pppp += 5
    meta = aliRWTest(test, meta)
    my_progress.progress(pppp)
    pppp += 5
empty.empty()
# st.dataframe(meta)
# col1, col2 = st.columns(2)
cnt = {
    'needRepair': 0,
    'needTest': 0,
    'noop': 0
}


def addTimeoutMsg(meta):
    def extractT(row):
        tmp = []
        if row['mark'] == 'repair':
            tmp.append('x')
            cnt['needRepair'] += 1
        elif row['mark'] == 'timeout':
            tmp.append('t')
            cnt['noop'] += 1
        else:
            tmp.append('-')
        if row['vkt.status'] == 'SUCCESS':
            tmp.append('-')
        elif row['vkt.status'] == 'FAILURE':
            tmp.append('x')
        else:
            tmp.append('?')
        if np.isnan(row['al_l90_t']):
            tmp.append('n')
        else:
            tmp.append('-')
            tmp.append(" {:.0f}".format(row['al_l90_t']))
        ret = ''.join(tmp)
        flag = ret[:3]
        if flag in cnt:
            cnt[flag] += 1
        else:
            cnt[flag] = 1
        return ret

    meta[['display_content']] = meta.apply(extractT, axis=1, result_type="expand")
    return meta


meta = addTimeoutMsg(meta[meta['tp_size'] == '.all'])


# expandData = meta.explode('al.raw').reset_index()
# st.write(meta.columns)
# 如果转成KM后取整（四舍五入），若整数部分<10，则加一位小数，
def human_format(num):
    suffixes = ['', 'K', 'M', 'G', 'T', 'P']
    m = int(math.log10(num) // 3)
    zheng = num / 1000.0 ** m
    if zheng < 10:
        return f'{zheng:.1f}{suffixes[m]}'
    else:
        return f'{zheng:.0f}{suffixes[m]}'


def mark2val(row):
    if row['mark'] != '-':
        return [1_000_000, 'ERROR' if row['mark'] == 'repair' else 'TIMEOUT']
    else:
        t = row['al_l90_t']
        return [t, 'None' if np.isnan(t) else human_format(t)]


meta[['al_l90_t', 'label2show']] = meta.apply(mark2val, axis=1, result_type="expand")
meta = meta[~np.isnan(meta['al_l90_t'])]
# with col1:
#     tabList = st.tabs(datasetList)
#     with tabList[0]:
#         groupByDataset('energy', meta)
#     with tabList[1]:
#         groupByDataset('traffic', meta)
#     with tabList[2]:
#         groupByDataset('syn', meta)
#
# with col2:
#     st.write(cnt)
#     pass
#
#
# tabs = st.tabs(readWriteTests)
# with tabs[0]:
#     groupByTest('ehistory', meta)
#     my_progress.progress(15)
# with tabs[1]:
#     groupByTest('snapshot', meta)
#     my_progress.progress(30)
# with tabs[2]:
#     groupByTest('aggmax', meta)
#     my_progress.progress(45)
# with tabs[3]:
#     groupByTest('etpc', meta)
#     my_progress.progress(60)
# with tabs[4]:
#     groupByTest('reachable', meta)
#     my_progress.progress(75)
# with tabs[5]:
#     groupByTest('append', meta)
#     my_progress.progress(88)
# with tabs[6]:
#     groupByTest('update', meta)
#     my_progress.progress(100)

# colDBs = st.tabs(dbList)
# for i, col in enumerate(colDBs):
#     db = dbList[i]
#     with col:
#         groupByDB(db, meta)
#
# pattern_scale = ({
#    'domain': ['timeout', 'repair', '-'],
#    'range': ['url(#diagonal_up_left)', 'url(#cross_hatch)', 'url(#diagonal_up_right)']  #
# })
colorMap = {'PG': '#ffaa6c', 'MA': '#ee8683', 'N1': '#c27294', 'N2': '#cba451', 'TGK': '#0081cf', 'TGC': '#4e78cf',
            'TGL': '#00a4de', 'TGKI': '#00c2d0', 'TGLI': '#00dbad', 'TGS': '#96ee86'}
meta['color'] = meta['db'].apply(lambda db: colorMap[db])
selection = alt.selection_multi(fields=['db'], bind='legend')
bar = alt.Chart(meta).mark_bar().encode(
    y=alt.Y("al_l90_t",
            axis=alt.Axis(format="~s"),
            title=None,
            scale=alt.Scale(type="symlog")),  #
    x=alt.X("db", title=None),
    color=alt.Color('color', scale=None),
    # fill=alt.Fill('mark:N', scale=pattern_scale),
    tooltip=["db", "lct_ct", "al_l90_t", "al_succ_cnt", "al_qcnt"],
    row="test",
    column="dataset",
    opacity=alt.condition(selection, alt.value(1), alt.value(0.1))
).properties(
    width=160,
    height=100
).add_selection(selection)
# text = bar.mark_text(
#     # align='center',
#     baseline='bottom'
# ).encode(
#     text=alt.Text('al_l90_t', format=',.2r')
# )  # .transform_calculate(label='datum. + ""')
# c = alt.layer(bar, text)  # .facet(
# c = bar.facet(
#     facet='dataset',
#     columns=3
# ).resolve_scale(
#     y='independent'
# )
st.altair_chart(bar)

ffg = px.bar(meta, y="al_l90_t", x="db", color="db", pattern_shape="mark",
             facet_row="test",
             facet_col="dataset",
             log_y=True,
             text="label2show",
             hover_data=["db", "lct_ct", "al_l90_t", "al_succ_cnt", "al_qcnt"],
             color_discrete_sequence=['#ffaa6c', '#ee8683', '#c27294', '#cba451', '#0081cf', '#4e78cf', '#00a4de',
                                      '#00c2d0', '#00dbad', '#96ee86'],
             )
ffg.update_traces(width=1.0, textfont_size=12, textangle=0, textposition="outside", cliponaxis=False)
ffg.update_layout(barmode='group', bargap=0.0, bargroupgap=0.0)
fgg = ffg.update_yaxes(matches=None, showticklabels=True)
st.plotly_chart(fgg)  # , use_container_width=True
# g = sns.FacetGrid(meta, col="dataset", row="test", margin_titles=True)
# g.map_dataframe(
# sns.swarmplot(x='db', y='al.raw', data=data, color='blue', size=4)
# sns.stripplot(x='db', y='al.raw', data=data, color='blue', alpha=0.3, size=4)
# sns.violinplot(x="db", y="al.raw", data=data, cut=0, inner='stick')
# st.pyplot(plt)
# st.dataframe(meta, use_container_width=True)
meta.sort_values(by=['tp_size', 'dataset', 'test'], inplace=True, ascending=True)
meta = meta[['db', 'tp_size', 'dataset', 'test', 'lct_ct', 'al_l90_t']]
tgk = meta[meta['db'] == 'TGK']
tgk = tgk[['tp_size', 'dataset', 'test', 'al_l90_t']]
joined = pd.merge(meta, tgk, how='left',
                  left_on=['tp_size', 'dataset', 'test'],
                  right_on=['tp_size', 'dataset', 'test'])


def smallerThanTGK(df):
    c1 = 'background-color: #0ff'
    c2 = ''
    mask = (df['al_l90_t_x'] < df['al_l90_t_y']) & df['db'].isin(['PG', 'MA', 'N1', 'N2'])
    # DataFrame with same index and columns names as original filled empty strings
    df1 = pd.DataFrame(c2, index=df.index, columns=df.columns)
    # modify values of df1 column by boolean mask
    df1.loc[mask, 'db'] = c1
    return df1


fj = joined.style.apply(smallerThanTGK, axis=None)
st.dataframe(fj)
# ret = AgGrid(joined, update_mode=GridUpdateMode.MANUAL)
