import pandas as pd
from datetime import datetime, timedelta, date,timezone
import yaml
import os
from pymongo import MongoClient 
import random
import numpy as np
import sys

mongo_ip = '10.0.0.233' #工控网IP 解码侧

mongo_ip2 = '10.0.0.152' #管理网IP 编码侧

# 管理网数据库数据服务器:
# user:root
# IP: 10.0.0.152
# 密码: ENINt@0@4

SVR_MONGO_URI = u'mongodb://eCDUsvr:eCDUsvr2020@{0}:27017'.format(mongo_ip)
SVR_DB_NAME = 'eCDU'
# pd.set_option('display.max_rows', None)
# pd.set_option('display.max_columns', None)
localtz = 'Asia/Shanghai' 


class MongoAccess():
    def __init__(self):
        self.client = MongoClient(SVR_MONGO_URI)
    def _tz_convert(self, start, end):
        if start.tz is None:
            start = start.tz_localize(localtz)
                
        if start.tz != 'UTC':
            start = start.astimezone('UTC')

        if end.tz is None:
            end = end.tz_localize(localtz)
        
        if end.tz != 'UTC':
            end = end.astimezone('UTC')
        return start, end

    def read_data(self, collection, start, end):
        start, end = self._tz_convert(start, end)
        try:
            find_str = {'$and': [{'time': {'$gte': start}}, {'time': {'$lte': end}}]}
            df = self.loadDataFrame(SVR_DB_NAME, collection, find_str)
            if df is not None and not df.empty:
                df.set_index('time', inplace=True)
                if df.index.tz is None:
                    df = utc_df2local(df)
        except:
            df = None
        return df

    def loadDataFrame(self, dbname, collection, find, incId=False, sort=None):
        db = self.client[dbname]
        coll = db[collection]
        if incId:
            cursor = coll.find(find)
        else:
            cursor = coll.find(find,{"_id":incId})
        if sort is not None:
            cursor = cursor.sort(sort)
        df =  pd.DataFrame(list(cursor))
        return df
        
    def loadDataFramePart(self, dbname, collection, query, projection, sort=None):
        db = self.client[dbname]
        coll = db[collection]
        cursor = coll.find(query,projection)
        if sort is not None:
            cursor = cursor.sort(sort)
        df =  pd.DataFrame(list(cursor))
        return df
        
    def load_docs(self, dbname, collection, find, filed=None, sort=None):
        try:
            db = self.client[dbname]  
            coll = db[collection]
            cursor = coll.find(find, filed)
            if sort is not None:
                cursor = cursor.sort(sort)
            return list(cursor)
        except Exception as ex:
            # ex_type, ex_val, ex_stack = sys.exc_info()
            # logger.error(ex_type)
            # logger.error(ex_val)
            # for stack in traceback.extract_tb(ex_stack):
            # logger.error(stack)
            return None

    def delete_docs(self, dbname, collection, find):
        try:
            db = self.client[dbname]
            coll = db[collection]
            return coll.delete_many(find)
        except Exception as ex:
            # ex_type, ex_val, ex_stack = sys.exc_info()
            # logger.error(ex_type)
            # logger.error(ex_val)
            # for stack in traceback.extract_tb(ex_stack):
            #     logger.error(stack)
            return None

    def insert_docs(self, dbname, collection, lddoc):
        try:
            db = self.client[dbname]
            coll = db[collection]
            return coll.insert_many(lddoc).inserted_ids
        except Exception as ex:
            # ex_type, ex_val, ex_stack = sys.exc_info()
            # logger.error(ex_type)
            # logger.error(ex_val)
            # for stack in traceback.extract_tb(ex_stack):
            #     logger.error(stack)
            return None

    def saveDataFrame(self, df, dbname, collection, deldata=False):
        db = self.client[dbname]
        coll = db[collection]
        if deldata:
            coll.delete_many({})
        
        if df.shape[0] > 0:
            coll.insert_many(df.to_dict('records'))
        
    def __delete(self):
        pass
        # self.dbcSvr.disconnect()
        
    def readLIMS(self, startTime, endTime):
        LIMSData = self.loadDataFrame(SVR_DB_NAME, 'd_LIMS',
                                      {"insertODSTime": {"$gt": startTime, "$lte": endTime}})
        if LIMSData is not None and not LIMSData.empty:
            LIMSData['samplingTime'] = LIMSData.set_index('samplingTime').index.tz_localize(
                timezone.utc).tz_convert(localtz)
            LIMSData['insertODSTime'] = LIMSData.set_index('insertODSTime').index.tz_localize(
                timezone.utc).tz_convert(localtz)
        return LIMSData 
                
                
                
        
    def run(self, pathName):
        self.pathName = pathName
        # now = datetime.now()
        # self.pathName = now.strftime('%Y%m%d%H%M')
        os.makedirs(self.pathName)
        # print('s1')
        now = datetime.now()
        self.endTime = now.today()
        
        year = int(pathName[0:4])
        month = int(pathName[4:6])
        day = int(pathName[6:8])
        self.endTime = self.endTime.replace(year=year, month=month, day=day,hour=0, minute=0, second=0,microsecond=0) + timedelta(hours=self.cfg['PHDDataHour'])
        self.endTime = pd.to_datetime(self.endTime)
        delta = timedelta(hours=self.cfg['PHDTimeDelta'])
        self.startTime = self.endTime-delta
        self.startTime, self.endTime = self._tz_convert(self.startTime, self.endTime)
                
        PHDData = None
        try:
            PHDData = self.read_data(self.qiluPHDSymbol, self.startTime, self.endTime)
        except:
            pass

        if PHDData is not None and not PHDData.empty:
            PHDData.sort_index(axis=1, inplace=True)
            PHDData.to_hdf(self.pathName + '\\PHD.h5',key='PHDData')
            PHDData.to_csv(self.pathName + '\\PHD.csv')
            
        PHDData_APC = None
        try:
            PHDData_APC = self.read_data('qiluCDUphd_APC', self.startTime, self.endTime)
        except:
            pass

        if PHDData_APC is not None and not PHDData_APC.empty:
            PHDData_APC.sort_index(axis=1, inplace=True)
            PHDData_APC.to_hdf(self.pathName + '\\PHD_APC.h5',key='PHDData')
            PHDData_APC.to_csv(self.pathName + '\\PHD_APC.csv')
            
        

        ProcPHDData = None
        try:
            # ProcPHDData = self.libProcPHD.read(self.qiluPHDProcSymbol, DateRange(self.startTime, self.endTime))
            ProcPHDData = self.read_data(self.qiluPHDProcSymbol, self.startTime, self.endTime)
        except:
            pass
        if ProcPHDData is not None and not ProcPHDData.empty:
            ProcPHDData.sort_index(axis=1, inplace=True)
            ProcPHDData.to_hdf(self.pathName + '\\ProcPHD.h5',key='PHDData')
            ProcPHDData.to_csv(self.pathName + '\\ProcPHD.csv')
            
        try:
            ProcPHDData = self.export_phdandprocphd_siteset(PHDData, ProcPHDData)
        except:
            pass
            
        latestData = None
        try:
            # ProcPHDData = self.libProcPHD.read(self.qiluPHDProcSymbol, DateRange(self.startTime, self.endTime))
            latestData = self.loadDataFrame(SVR_DB_NAME, 'd_latestdata', {})
        except:
            pass
        if latestData is not None and not latestData.empty:
            # ProcPHDData.sort_index(axis=1, inplace=True)
            # ProcPHDData.to_hdf(self.pathName + '\\ProcPHD.h5',key='PHDData')
            latestData.to_csv(self.pathName + '\\d_latestdata.csv')



        LIMSData = self.loadDataFrame(SVR_DB_NAME, 'd_LIMS',
                                      {"insertODSTime": {"$gt": self.startTime, "$lte": self.endTime}})
        if LIMSData is not None and not LIMSData.empty:
            LIMSData['samplingTime'] = LIMSData.set_index('samplingTime').index.tz_localize(
                timezone.utc).tz_convert(localtz)
            LIMSData['insertODSTime'] = LIMSData.set_index('insertODSTime').index.tz_localize(
                timezone.utc).tz_convert(localtz)
            LIMSData.to_csv(self.pathName + '\\LIMS.csv', encoding='utf_8_sig')
        procLIMSStartTime = self.startTime - delta  # double delta to avoid the samplingTime delay
        ProcessLIMSData = self.loadDataFrame(SVR_DB_NAME, 'd_ProcessLIMS',
                                             {"samplingTime": {"$gt": procLIMSStartTime, "$lte": self.endTime}})
        if not ProcessLIMSData.empty:
            ProcessLIMSData['samplingTime'] = ProcessLIMSData.set_index('samplingTime').index.tz_localize(
                timezone.utc).tz_convert(localtz)
            dataDict = {}
            for idx in ProcessLIMSData.index:
                for tag, val in ProcessLIMSData.loc[idx,'data'].items(): 
                    if tag not in dataDict:
                        dataDict[tag] = [val]
                    else:
                        dataDict[tag].append(val)
            try:
                ProcessLIMSData_save = pd.DataFrame(data=dataDict, index=ProcessLIMSData['samplingTime'])
                # ProcessLIMSData_save.to_hdf(self.pathName + '\\ProcessLIMS.h5',key='ProcessLIMSData')
                ProcessLIMSData_save.to_csv(self.pathName + '\\ProcessLIMS.csv',encoding='utf_8_sig')
            except:
                pass
            
        
        ssiGUI = self.loadDataFrame(SVR_DB_NAME,'m_tag_time_chart',{"time": {"$gt": self.startTime, "$lte": self.endTime}})
        
        if not ssiGUI.empty:
            ssiGUI['time'] = ssiGUI.set_index('time').index.tz_localize(timezone.utc).tz_convert(localtz)
            # ssiGUI.to_hdf(self.pathName + '\\ssiGUI.h5',key='ssiGUIData')
            ssiGUI.to_csv(self.pathName + '\\ssiGUI.csv',encoding='utf_8_sig')
            
        crude_changes = self.loadDataFrame(SVR_DB_NAME, 'detected_crude_changes',
                                           {"timestamp": {"$gt": self.startTime, "$lte": self.endTime}})
        if not crude_changes.empty:
            crude_changes['timestamp'] = crude_changes.set_index('timestamp').index.tz_localize(
                timezone.utc).tz_convert(localtz)
            crude_changes.to_hdf(self.pathName + '\\crude_changes.h5', key='crudeChangeData')
            crude_changes.to_csv(self.pathName + '\\crude_changes.csv', encoding='utf_8_sig')
        
        # changedBounds = self.loadDataFrame(SVR_DB_NAME,'auto_range_bound_changes',{"date": {"$gt": self.startTime, "$lte": self.endTime}})
        startTimeTemp = self.endTime - timedelta(days=90) 
        changedBounds = self.loadDataFrame(SVR_DB_NAME,'auto_range_bound_changes',{"date": {"$gt": startTimeTemp, "$lte": self.endTime}})

        if not changedBounds.empty: 
            changedBounds.to_csv(self.pathName + '\\auto_range_bound_changes.csv',encoding='utf_8_sig')
        
        bDataprocessCfg = self.load_docs(SVR_DB_NAME,'b_dataprocess_config',{})
        if len(bDataprocessCfg) == 1:
            bDataprocessCfg = bDataprocessCfg[0]
            outRemovdedRange = bDataprocessCfg['out of range removal']
            outRemovdedRange = pd.DataFrame(data=outRemovdedRange)
            outRemovdedRange.to_csv(self.pathName + '\\b_dataprocess_config.csv',encoding='utf_8_sig')
            
        symbol = 'qiluCDUsoftsensor'
        VTagData = None
        try:
            # VTagData = self.libVTag.read(symbol, DateRange(self.startTime, self.endTime))
            VTagData = self.read_data(symbol, self.startTime, self.endTime)
        except:
            pass

        if VTagData is not None and not VTagData.empty:
            # VTagData.to_hdf(self.pathName + '\\CDU3VTag.h5', key='VTag')
            VTagData.to_csv(self.pathName + '\\CDU3VTag.csv')

    def load_vuser2_datas(self, collections=None, begintime=None, endtime=None, dbcSvr=None):
        if collections is None:
            collections = ['d_smooth_result', 'd_rtsimu_inputs', 'd_rtsimu_result', 'd_rtsimu_out_cvs',
                           'd_rtsimu_out_mvs', 'd_vu2_info', 'd_rtsimu_info']
        dfs = []
        if begintime is None and endtime is None:
            findstr = {}
        elif begintime is None:
            findstr = {'time': {'$lte': endtime}}
        elif endtime is None:
            findstr = {'time': {'$gte': begintime}}
        else:
            findstr = {'$and': [{'time': {'$lte': endtime}}, {'time': {'$gte': begintime}}]}
            
        for colle in collections:
            # df = self.loadDataFrame(SVR_DB_NAME, colle, findstr, incId=True, sort=[('startTime',1)])
            df = self.loadDataFrame(SVR_DB_NAME, colle, findstr, incId=True)
            try:
                df['time'] = df.set_index('time').index.tz_localize(timezone.utc).tz_convert(localtz)
                df.set_index('time', inplace=True)
            except:
                df = None
            dfs.append(df)
            print(colle)
            print(df)
            # print(df.index)
            # print(df.columns)
        return dfs
        
    def load_dcase_result_datas(self):
        if self.startTime is None and self.endTime is None:
            findstr_ssi = {}
            findstr_notssi = {}
        elif self.startTime is None:
            findstr_ssi = {'end_time': {'$lte': self.endTime}}
            findstr_notssi = {'end_time': {'$lte': self.endTime}}
        elif self.endTime is None:
            findstr_ssi = {'start_time': {'$gte': self.startTime}}
            findstr_notssi = {'start_time': {'$gte': self.startTime}}
        else:
            findstr_ssi = {'$and': [{'end_time': {'$lte': self.endTime}}, {'end_time': {'$gt': self.startTime}}]}
            findstr_notssi = {'$and': [{'end_time': {'$lte': self.endTime}}, {'end_time': {'$gt': self.startTime}}]}

        s_df = self.loadDataFrame(SVR_DB_NAME, 'd_ssidata',  findstr_ssi, False, [('start_time',1)])
        sshead_df = self.loadDataFrame(SVR_DB_NAME, 'd_ssiheadresult',  findstr_ssi, False, [('start_time',1)])
        # print(s_df.shape)
        # print(sshead_df.shape)
        
        ssi_df = None
        if s_df is not None and not s_df.empty:
            ssi_df = s_df.loc[:, ['start_time', 'end_time', 'caseID']]
            ssi_df.set_index('start_time', inplace=True)
            ssi_df['_id'] = 1
            ssi_df = ssi_df.loc[:, ['_id', 'end_time', 'caseID']]
            ssi_status_df = ssi_df

        if sshead_df is not None and not sshead_df.empty:
            sshead_df['num'] = range(0, len(sshead_df))
            sshead_df['caseID'] = ''
            for sshead_index in sshead_df.index:
                sshead_df.loc[sshead_index, 'caseID'] = 'C_{}_{}'.format(sshead_df.loc[sshead_index, 'num'],
                                                                         sshead_df.loc[sshead_index, 'status'])

            sshead_status_df = sshead_df
            sshead_status_df.set_index('start_time', inplace=True)
            sshead_status_df['_id'] = sshead_status_df['status']
            sshead_status_df = sshead_status_df.loc[:, ['_id', 'end_time', 'caseID']]
            if ssi_df is not None and not ssi_df.empty:
                ssi_status_df = pd.concat([ssi_df, sshead_status_df], axis=0)

        

        if s_df is None or s_df.empty:
            return
            
        if ssi_status_df is None or ssi_status_df.empty:
            return
            
        ssi_status_df.reset_index(inplace=True)
        ssi_status_df.set_index('caseID', inplace=True)
        ssi_status_df = ssi_status_df[['start_time', 'end_time', '_id']]
        ssi_status_df['recon'] = np.nan
        ssi_status_df['opt'] = np.nan

        cols = []
        s_df.set_index('caseID', inplace=True)
        for col in s_df.columns:
            if col in ['app', '_id', 'equip', 'user']:
                continue
            cols.append(col)
        s_df = s_df[cols]

        s_df['start_time'] = s_df.set_index('start_time').index.tz_localize(timezone.utc).tz_convert(localtz)
        s_df['end_time'] = s_df.set_index('end_time').index.tz_localize(timezone.utc).tz_convert(localtz)
        s_df.to_csv(self.pathName+"\\ssi_case.csv")

        caseids = list(s_df.index)
        # docs = self.load_docs(SVR_DB_NAME, 'd_case', {'_id': {'$in': caseids}}, {'_id': 1, 'data': 1, 'recon': 1, 'opt': 1}, [('startTime',1)])
        #docs = self.load_docs(SVR_DB_NAME, 'd_case', {'_id': {'$in': caseids}}, {}, [('startTime', 1)])
        docs = self.load_docs(SVR_DB_NAME, 'd_case', {'_id': {'$in': caseids}}, None, [('startTime', 1)])
        try:
            self.export_case_siteset(docs, caseids)
        except:
            pass

        # try:
            # self.copy_files(r'\\192.168.0.2\wd', self.pathName, caseids)
        # except:
            # pass
            
        df_list = []
        tags = set()
        if docs is not None:
            for doc in docs:
                caseID = doc['_id']
                if 'data' not in doc.keys():
                    continue
                data_dir = doc['data']
                
                if 'recon' in doc.keys() and caseID in ssi_status_df.index:
                    ssi_status_df.loc[caseID, 'recon'] = doc['recon']
                if 'opt' in doc.keys() and caseID in ssi_status_df.index:
                    ssi_status_df.loc[caseID, 'opt'] = doc['opt']
                    
                if 'recon' in doc.keys():
                    if int(doc['recon']) in [5,12]:
                        continue
                else:
                    continue
                    
                df = pd.DataFrame().from_dict(data_dir, orient='index')
                new_cols = []
                # for prop in ['avg', 'reconLB', 'reconUB', 'xr', 'optLB', 'optUB', 'xo']:
                for prop in ['avg', 'reconLB', 'reconUB', 'xr', 'xrraw', 'xries', 'optLB', 'optUB', 'xo', 'xoies',
                             'xoraw']:
                    if prop in df.columns:
                        df['{}_{}'.format(caseID, prop)] = df[prop]
                        new_cols.append('{}_{}'.format(caseID, prop))
                    else:
                        pass
                        df['{}_{}'.format(caseID, prop)] = np.nan
                df = df[new_cols]
                df_list.append(df)
                tags = tags | set(df.index.values)

        new_df_list = []
        for df in df_list:
            new_df = df.reindex(list(tags), axis='index')
            new_df_list.append(new_df)
        if len(new_df_list) > 0:
            all_df = pd.concat(new_df_list, join='outer', axis=1)

            # print(all_df.shape)
            if all_df is not None:
                all_df.to_csv(self.pathName+"\\recon_opt.csv")

        # print(ssi_status_df.shape)
        if ssi_status_df is not None:
            ssi_status_df.sort_values('end_time', inplace=True)
            # timezone convert way reference https://www.jianshu.com/p/ab7514dc6190
            # ssi_status_df["start_time"] = ssi_status_df["start_time"].astype('datetime64[ns, Asia/Shanghai]')
            # ssi_status_df["end_time"] = ssi_status_df["end_time"].astype('datetime64[ns, Asia/Shanghai]')
            # or below timezone convert
            ssi_status_df["start_time"] = ssi_status_df.set_index('start_time').index.tz_localize(
                timezone.utc).tz_convert(localtz)
            ssi_status_df["end_time"] = ssi_status_df.set_index('end_time').index.tz_localize(
                timezone.utc).tz_convert(localtz)
                
            print("ssi_status_df:\n", ssi_status_df)
            ssi_status_df.to_csv(self.pathName+"\\ssi_status.csv")
        print("end load_dcase_result_datas")

    def export_case_siteset(self, docs, caseids):
        ssi_docs = self.load_docs(SVR_DB_NAME, 'd_ssidata', {'caseID': {'$in': caseids}})
        for ssi_doc in ssi_docs:
            del ssi_doc['_id']
        self.delete_docs(SVR_DB_NAME, 'd_case_curday', {})
        self.delete_docs(SVR_DB_NAME, 'd_ssidata_curday', {})

        self.insert_docs(SVR_DB_NAME, 'd_case_curday', docs)
        self.insert_docs(SVR_DB_NAME, 'd_ssidata_curday', ssi_docs)

    def export_phdandprocphd_siteset(self, phd_df, procphd_df):
        phd_df['time'] = phd_df.index
        procphd_df['time'] = procphd_df.index
        self.saveDataFrame(phd_df, SVR_DB_NAME, 'd_phd_curday', True)
        self.saveDataFrame(procphd_df, SVR_DB_NAME, 'd_procphd_curday', True)

    def copy_files(self, src_dir, des_dir, caseIDs):
        from pathlib import Path
        import shutil
        for caseID in caseIDs:
            src_dirpath = Path(src_dir) / caseID / 'gms'
            des_dirpath = Path(des_dir) / 'wd' / caseID
            if not os.path.exists(des_dirpath):
                os.makedirs(des_dirpath)
            for filename in ['eCDU_Overall.g00',
                             'eCDU_DataRecon_Casedata.gdx',
                             'eCDU_DataRecon_Result.gdx',
                             'eCDU_DataReconcilation_Case.lst',
                             'RTDataRecon.g00',
                             'eCDU_Optimisation_Casedata.gdx',
                             'eCDU_Optimisation_Result.gdx',
                             'eCDU_Optimisation_Case.lst']:
                if os.path.exists(src_dirpath / filename):
                    try:
                        shutil.copyfile(src_dirpath / filename, des_dirpath / filename)
                    except:
                        pass
                else:
                    print(src_dirpath / filename)

        
    def copy_g00files(self, src_dir, des_dir, caseIDs):
        from pathlib import Path
        import shutil
        for caseID in caseIDs:
            src_dirpath = Path(src_dir) / caseID / 'gms'
            des_dirpath = Path(des_dir) / 'wd' / caseID
            if not os.path.exists(des_dirpath):
                os.makedirs(des_dirpath)
            for filename in ['eCDU_Overall.g00',
                             'eCDU_DataRecon_Casedata.gdx',
                             'eCDU_DataRecon_Result.gdx',
                             'eCDU_DataReconcilation_Case.lst',
                             'RTDataRecon.g00',
                             'eCDU_Optimisation_Casedata.gdx',
                             'eCDU_Optimisation_Result.gdx',
                             'eCDU_Optimisation_Case.lst']:
                if os.path.exists(src_dirpath / filename):
                    try:
                        shutil.copyfile(src_dirpath / filename, des_dirpath / filename)
                    except:
                        pass
                else:
                    print(src_dirpath / filename)

        
    def getdcaseTagData(self):
        findstr1 = {'$and': [{'endTime': {'$lte': self.endTime}}, {'startTime': {'$gt': self.startTime}}]}
        findstr2 = {'$and': [{'endTime': {'$gte': self.startTime}}, {'startTime': {'$lt': self.startTime}}]} 
        findstr3 = {'$and': [{'endTime': {'$gte': self.endTime}}, {'startTime': {'$lt': self.endTime}}]}
        findstr = {'$or': [findstr1, findstr2, findstr3]}
        
        docs = self.load_docs(SVR_DB_NAME, 'd_case', findstr, {'_id': 1, 'tagData': 1,'startTime': 1, 'endTime': 1})
        if docs is None:
            return
        
        tagIndex = None
        for doc in docs:
            if 'tagData' not in doc.keys():
                continue
            tagData = doc['tagData']
            tagIndex_iter = pd.Index(tagData.keys())
            if tagIndex is None:
                tagIndex = tagIndex_iter
            else:
                tagIndex = tagIndex.union(tagIndex_iter)
                
        dataDict = {'startTime':[],'endTime':[]}
        caseIDList = []
        for doc in docs:
            if 'tagData' not in doc.keys():
                continue
            
            caseIDList.append(doc['_id'])
            caseStartTime = pd.Timestamp(doc['startTime'])
            caseStartTime = caseStartTime.tz_localize(timezone.utc).tz_convert(localtz)
            caseEndTime = pd.Timestamp(doc['endTime'])
            caseEndTime = caseEndTime.tz_localize(timezone.utc).tz_convert(localtz)
            
            if 'startTime' in doc.keys():
                dataDict['startTime'].append(caseStartTime)
            else:
                dataDict['startTime'].append(None)
                
            if 'endTime' in doc.keys():
                dataDict['endTime'].append(caseEndTime)
            else:
                dataDict['endTime'].append(None)
                
            tagData = doc['tagData']
            keys_iter = pd.Index(tagData.keys())
            diff_iter = tagIndex.difference(keys_iter)
            for idx in keys_iter:
                if idx in dataDict: 
                    dataDict[idx].append(tagData[idx])
                else:
                    dataDict[idx] = [tagData[idx]]
                    
            for idx in diff_iter:
                if idx in dataDict:
                    dataDict[idx].append(None)
                else:
                    dataDict[idx] = [None]
            
        if len(caseIDList) > 0:
            df = pd.DataFrame(data=dataDict,index=caseIDList).T
            df.to_csv(self.pathName+"\\caseTagData.csv")
            
    def getdcaseAvgData(self):
        # self.startTime = datetime(2019,5,12) 
        # self.endTime = datetime(2019,5,14)
        findstr1 = {'$and': [{'endTime': {'$lte': self.endTime}}, {'startTime': {'$gt': self.startTime}}]}
        findstr2 = {'$and': [{'endTime': {'$gte': self.startTime}}, {'startTime': {'$lt': self.startTime}}]} 
        findstr3 = {'$and': [{'endTime': {'$gte': self.endTime}}, {'startTime': {'$lt': self.endTime}}]}
        findstr = {'$or': [findstr1, findstr2, findstr3]}
        docs = self.load_docs(SVR_DB_NAME, 'd_case', findstr)#, {'_id': 1, 'data': 1,'startTime': 1, 'endTime': 1}
        if docs is None:
            return
        
        tagIndex = None
        for doc in docs:
            if 'data' not in doc.keys():
                continue
            dataCollect = doc['data']
            tagIndex_iter = pd.Index(dataCollect.keys())
            if tagIndex is None:
                tagIndex = tagIndex_iter
            else:
                tagIndex = tagIndex.union(tagIndex_iter)

        casekeys = ['startTime', 'endTime', 'recon', 'opt', 'reon_version', 'recon_time_cost', 'reconObj', 'optObj',
                    'opt_time_cost', 'VDU_EnergyImbalance', 'Preflash_EnergyImbalance', 'ADU_EnergyImbalance',
                    'fTotalObj', 'recon_fEnthalpyObj', 'fPenalisedKeyOpCondPen', 'DataRecCPU', 'InitCPU',
                    'fInitConvergenceObj', 'ItterationCount', 'fInitObjSmallProgress', 'opt_fEnthalpyObj', 'fOptObj',
                    'fKeyOpParaChgPen', 'RTOCPU', 'LocalOptimalEscapeCPU']

        # dataDict = {'startTime':[],'endTime':[]}
        dataDict = {}
        for casekey in casekeys:
            dataDict[casekey] = []
            
        
        
        caseIDList = []
        for doc in docs:
            
            if 'data' not in doc.keys():
                continue
            
            caseIDList.append(doc['_id'])
            caseStartTime = pd.Timestamp(doc['startTime'])
            caseStartTime = caseStartTime.tz_localize(timezone.utc).tz_convert(localtz)
            doc['startTime'] = caseStartTime 
            caseEndTime = pd.Timestamp(doc['endTime'])
            caseEndTime = caseEndTime.tz_localize(timezone.utc).tz_convert(localtz)
            doc['endTime'] = caseEndTime
            

            for casekey in casekeys:
                if casekey in doc.keys():
                    dataDict[casekey].append(doc[casekey])
                else:
                    dataDict[casekey].append(None)
                    
            
            # if 'startTime' in doc.keys():
                # dataDict['startTime'].append(caseStartTime)
            # else:
                # dataDict['startTime'].append(None)
                
            # if 'endTime' in doc.keys():
                # dataDict['endTime'].append(caseEndTime)
            # else:
                # dataDict['endTime'].append(None)
                
            dataCollect = doc['data']
            keys_iter = pd.Index(dataCollect.keys())
            diff_iter = tagIndex.difference(keys_iter)
            for idx in keys_iter:
                if idx in dataDict:
                    if 'avg' in dataCollect[idx]:  
                        dataDict[idx].append(dataCollect[idx]['avg'])
                    else:
                        dataDict[idx].append(None)
                else:
                    if 'avg' in dataCollect[idx]: 
                        dataDict[idx] = [dataCollect[idx]['avg']]
                    else:
                        dataDict[idx] =  [None]
                    
            for idx in diff_iter:
                if idx in dataDict:
                    dataDict[idx].append(None)
                else:
                    dataDict[idx] = [None]
            
        if len(caseIDList) > 0:
            df = pd.DataFrame(data=dataDict,index=caseIDList).T
            df.to_csv(self.pathName+"\\caseDataAvg.csv") 

        docs = self.load_docs(SVR_DB_NAME, 'd_case', findstr)#, {'_id': 1, 'data': 1,'startTime': 1, 'endTime': 1}
        
        df_systemErr = self.loadDataFrame(SVR_DB_NAME, 'd_BoundsHist', {})
        if df_systemErr is not None and not df_systemErr.empty:
            df_systemErr.to_csv(self.pathName+"\\systemerror.csv")
        
        df_fixedVal = self.loadDataFrame(SVR_DB_NAME, 'd_usedAHist', {})
        if df_fixedVal is not None and not df_fixedVal.empty:
            df_fixedVal.to_csv(self.pathName+"\\fixedvalue.csv")
        
    
    def readSoftSensorResult(self):
        # store = Arctic(mongo_host=SVR_MONGO_URI)
        symbol = 'qiluCDUsoftsensor'
        # libPHD = store[libName]

        data1 = None
        try:
            # data1 = libPHD.read(symbol, date_range=DateRange(
            #     self.startTime, self.endTime))
            # data2 = libPHD.read(symbol + 'Confidence', date_range=DateRange(
            #     self.startTime, self.endTime))
            # data3 = libPHD.read(symbol + 'A', date_range=DateRange(
            #     self.startTime, self.endTime))
            data1 = self.read_data(symbol, self.startTime, self.endTime)

        except Exception as es:
            print("Exception:", es)

        if data1 is not None and not data1.empty:
            data1.sort_index(axis=1, inplace=True)
            data1.to_csv(self.pathName + "\\softSensor_result.csv", encoding='utf_8_sig')
            print("read data CDU3SoftCensor is:\n", data1.iloc[[0, -1]])
        else:
            print("read data CDU3SoftCensor is None")
        
        # if data2 is not None:
            # data2.sort_index(axis=1, inplace=True)
            # data2.to_csv(self.pathName + "\\softSensor_Confidence_result.csv", encoding='utf_8_sig')
        
        # if data3 is not None:
            # data3.sort_index(axis=1, inplace=True)
            # data3.to_csv(self.pathName + "\\softSensor_A_result.csv", encoding='utf_8_sig')
            # print("read data CDU3SoftCensor is:\n", data3.iloc[[0, -1]])

    def load_vuser2_data2csv(self):
        csvfile = self.pathName + "\\{}.csv"
        colls = ['d_smooth_result', 'd_rtsimu_inputs', 'd_rtsimu_result', 'd_rtsimu_out_cvs', 'd_rtsimu_out_mvs',
                 'd_vu2_info', 'd_rtsimu_info']
        dfs = self.load_vuser2_datas(colls, self.startTime, self.endTime, self.client)
        for str, df in zip(colls, dfs):
            if df is not None:
                print("save csvfile:", csvfile.format(str))
                df.to_csv(csvfile.format(str))
    
    
            
    def readSoftSensorOtherResult(self):
        csvfile = self.pathName+"\\{}.csv"
        colls = ['b_softSensorModel', 'b_linearRegB', 'b_ANNX0Y0', 'd_validLIMSSample']
        for colle in colls:
            df = self.loadDataFrame(SVR_DB_NAME, colle, {}, incId=True)
            if df is None or df.empty:
                continue
            if colle == 'b_linearRegB':
                if 'enTime' in df.columns.values:
                    df['enTime'] = pd.to_datetime(df['enTime'].values).tz_localize(timezone.utc).tz_convert(localtz)
                if 'updateTime' in df.columns.values:
                    df['updateTime'] = pd.to_datetime(df['updateTime'].values).tz_localize(timezone.utc).tz_convert(localtz)
            elif colle == 'b_ANNX0Y0':
                if 'enTime' in df.columns.values:
                    df['enTime'] = pd.to_datetime(df['enTime'].values).tz_localize(timezone.utc).tz_convert(localtz)
                if 'updateTime' in df.columns.values:
                    df['updateTime'] = pd.to_datetime(df['updateTime'].values).tz_localize(timezone.utc).tz_convert(localtz)
            elif colle == 'd_validLIMSSample':
                df['samplingTime'] = pd.to_datetime(df['samplingTime'].values).tz_localize(timezone.utc).tz_convert(localtz)
            if df is not None:
                print("save csvfile:", csvfile.format(colle))
                df.to_csv(csvfile.format(colle),encoding='utf_8_sig')
            
            
if __name__ == "__main__":
    endTime = datetime.now()
    endTime = pd.to_datetime(endTime)
    delta = timedelta(hours=1)
    startTime = endTime-delta
    cli = MongoAccess()
    LIMSData = cli.readLIMS(startTime, endTime)
    
    
