# -*- coding:utf-8 -*-
#author: mz.mail@foxmail.com
#edition: V1.0

from . import dbmthpkg as db
import pandas as pd
import numpy as np
import traceback
import datetime
import warnings
import time
import re
import os
warnings.filterwarnings("ignore")#忽略警告
pd.set_option('display.unicode.east_asian_width',True)#表格对齐

#导入当前目录下文件[xlsx/csv]
def insertables():
    try:
        print("开始导入当前目录下文件[xlsx/csv]...") 
        filelist=os.listdir(os.getcwd())
        for file in filelist.copy():
            if str(file).lower().endswith('.xlsx') or str(file).lower().endswith('.xls') or str(file).lower().endswith('.csv'):
                pass
            else:
                filelist.remove(file)
        for file in filelist:
            if str(file).lower().endswith('.xlsx') or str(file).lower().endswith('.xls'):
                xls=pd.ExcelFile(str(file))
                sheet_names=xls.sheet_names
                if len(sheet_names)>1:
                    for sheet in sheet_names:
                        if db.getinput("是否导入文件["+str(file)+"]工作表["+str(sheet)+"]? y/n:",re.compile(r'([yn]{1})', re.I)).lower()=='y':
                            print("读取文件["+str(file)+"]工作表["+str(sheet)+"]...")
                            df=pd.read_excel("./"+str(file),sheet_name=str(sheet))#;print(df.head())
                            if len(df)>0:
                                insertsingletable(df,str(sheet).upper())
                            else:
                                print("文件["+str(file)+"]工作表["+str(sheet)+"]无数据,自动跳过!")
                else:
                    if db.getinput("是否导入文件["+str(file)+"]? y/n:",re.compile(r'([yn]{1})', re.I)).lower()=='y':
                        print("读取文件["+str(file)+"]...")
                        df=pd.read_excel("./"+str(file),0)#;print(df.head())
                        if len(df)>0:
                            insertsingletable(df,str(file).upper().rstrip(".XLSX").rstrip(".XLS"))
                        else:
                            print("文件["+str(file)+"]无数据,自动跳过!")
            else:
                if db.getinput("是否导入文件["+str(file)+"]? y/n:",re.compile(r'([yn]{1})', re.I)).lower()=='y':
                    print("读取文件["+str(file)+"]...")
                    df=pd.read_csv("./"+str(file))#;print(df.head())
                    if len(df)>0:
                        insertsingletable(df,str(file).upper().rstrip(".CSV"))
                    else:
                        print("文件["+str(file)+"]无数据,自动跳过!")
        print("导入规则表执行完成!")
    except:
        print(str(os.sys.exc_info()[1]))
        print(traceback.format_exc())
        print("插入数据执行失败,请检查后重试!")

#导入单个表
def insertsingletable(df:pd.DataFrame,tabname:str):
    try:
        conn=db.makedbconn();renamedict={};cols=[]
        for col in df.columns:renamedict[str(col)]=str(col).upper();cols.append(str(col).upper())
        df.rename(columns=renamedict,inplace=True)
        if db.dbtype==2:
            df=pd.DataFrame(df.astype(object));df=df.where(pd.notna(df),None)
            df.rename(columns={'REFERENCE','REFERENCEDM'},inplace=True)
        def getsqltabelcols(tab):
            _,cols=db.sqlqueryallwithcols(conn,"select * from "+tab+" where 1<>1",False)
            return cols
        tabcols=getsqltabelcols(tabname)
        while tabcols is None:
            tabname=db.getinput("["+tabname+"]表不存在,请重新指定表名称:",re.compile(r'(.*)', re.I)).upper()
            tabcols=getsqltabelcols(tabname)
        defcols=set(tabcols).difference(set(df.columns)).intersection({'CITY_ADMDVS','BCHNO','STD_BCHNO','DIAG_BCHNO','OPRN_BCHNO','BCH_NO','RID','CRTE_TIME','UPDT_TIME'})
        if len(defcols)>0:
            if db.getinput("监测到缺少批次号/时间/区域字段"+str(list(defcols))+",是否使用默认值? y/n:",re.compile(r'([yn]{1})', re.I)).lower()=='y':
                for col in defcols:
                    if col in ['CITY_ADMDVS']:
                        df[str(col)]='-1'
                    elif col in ['RID']:
                        df[str(col)]=df.apply(lambda x:db.SnowID(),axis=1,result_type='reduce')
                    elif col in ['CRTE_TIME','UPDT_TIME']:
                        df[str(col)]=datetime.datetime.now()
                    else:
                        df[str(col)]='YB2020'  
        for col in set(tabcols).difference(set(df.columns)):
            if col.endswith('_ID'):
                if db.getinput("["+str(col)+"]可能是数据唯一标签列,建议使用随机数,是否使用随机数? y/n:",re.compile(r'([yn]{1})', re.I)).lower()=='y':
                    print("指定列["+str(col)+"]的默认值为: 随机数(SnowID)")
                    df[str(col)]=df.apply(lambda x:db.SnowID(),axis=1,result_type='reduce')
        extracols=set(tabcols).difference(set(df.columns))
        for col in extracols:
            ipt=db.getinput("请指定列["+str(col)+"]的默认值,如需设为当前时间请输入[now()],设为UUID请输入[uuid()],设为SnowID请输入[snow()],设为空值请输入[none()]:",re.compile(r'(.*)', re.I)).strip()
            if ipt.lower()=='now()':
                print("指定列["+str(col)+"]的默认值为: "+str(time.strftime("%Y-%m-%d %H:%M:%S",time.localtime())))
                df[str(col)]=datetime.datetime.now()
            elif ipt.lower()=='uuid()':
                print("指定列["+str(col)+"]的默认值为: 随机数(UUID)")
                df[str(col)]=df.apply(lambda x:db.UUID(),axis=1,result_type='reduce')
            elif ipt.lower()=='snow()':
                print("指定列["+str(col)+"]的默认值为: 随机数(SnowID)")
                df[str(col)]=df.apply(lambda x:db.SnowID(),axis=1,result_type='reduce')
            elif ipt.lower()=='none()':
                print("指定列["+str(col)+"]的默认值为: 空值(None)")
                df[str(col)]=df.apply(lambda x:None,axis=1,result_type='reduce')
            else:
                print("指定列["+str(col)+"]的默认值为: "+str(ipt))
                df[str(col)]=str(ipt)
        df.drop(list(set(df.columns).difference(set(tabcols))),axis=1,inplace=True);df=df[tabcols]
        print("即将导入表["+tabname+"]:")
        db.printdfheadtranspose(df)
        if db.dbtype==2:print("达梦数据库不支持文本形式时间插入,注意将所有时间字段使用totime()转为time格式!")
        confirm=db.getinput("字段内容是否确认无误,无需修改? y/n:",re.compile(r'([yn]{1})', re.I)).lower()
        while confirm=='n':
            col=db.getinput("请指定需要修改的列名称:",re.compile(r'(.*)', re.I)).strip().upper()
            while col not in df.columns:
                col=db.getinput("列名错误，请重新指定需要修改的列名称:",re.compile(r'(.*)', re.I)).strip().upper()
            ipt=db.getinput("请指定列["+str(col)+"]的默认值,如需设为当前时间请输入[now()],设为UUID请输入[uuid()],设为SnowID请输入[snow()],设为空值请输入[none()],将该列转换为时间格式请输入[totime())]:",re.compile(r'(.*)', re.I)).strip()
            if ipt.lower()=='now()':
                print("指定列["+str(col)+"]的默认值为: "+str(time.strftime("%Y-%m-%d %H:%M:%S",time.localtime())))
                df[str(col)]=datetime.datetime.now()
            elif ipt.lower()=='uuid()':
                print("指定列["+str(col)+"]的默认值为: 随机数(UUID)")
                df[str(col)]=df.apply(lambda x:db.UUID(),axis=1,result_type='reduce')
            elif ipt.lower()=='snow()':
                print("指定列["+str(col)+"]的默认值为: 随机数(SnowID)")
                df[str(col)]=df.apply(lambda x:db.SnowID(),axis=1,result_type='reduce')
            elif ipt.lower()=='none()':
                print("指定列["+str(col)+"]的默认值为: 空值(None)")
                df[str(col)]=df.apply(lambda x:None,axis=1,result_type='reduce')
            elif ipt.lower()=='totime()':
                print("转换列["+str(col)+"]值为: 时间格式(datetime)")
                df[str(col)]=pd.to_datetime(df[str(col)],errors='ignore')
            else:
                print("指定列["+str(col)+"]的默认值为: "+str(ipt))
                df[str(col)]=str(ipt)
            db.printdfheadtranspose(df)
            confirm=db.getinput("字段内容是否确认无误,无需修改? y/n:",re.compile(r'([yn]{1})', re.I)).lower()
        if int(db.sqlqueryone(conn,"select count(*) from "+tabname)[0])>0:
            if db.getinput("["+tabname+"]表内已有数据,是否清空原表, y/n:",re.compile(r'([yn]{1})', re.I)).lower()=='y':
                try:
                    db.sqlexcute(conn,"truncate table "+tabname,False)
                except:
                    db.sqlexcute(conn,"delete from "+tabname)
        print("开始导入表["+tabname+"]:")
        sql_insert="INSERT INTO "+tabname+" ("+",".join(df.columns)+") VALUES(:"+",:".join(df.columns)+")"
        db.sqlinsertlist(conn,sql_insert,np.array(df).tolist())
        db.disposedbconn(conn)
        print("表["+tabname+"]导入完成!")
    except:
        print(str(os.sys.exc_info()[1]))
        print(traceback.format_exc())
        print("插入表["+tabname+"]数据执行失败,请检查后重试!")

#转换表名称和列名称到[ALTER_NAME]
def cvtexcels():
    try:
        print("开始EXCEL表结构转换...")
        print("本操作仅执行当前文件夹下EXCEL文件的表名称和列名称转换...")
        print("请提前准备表关系转换配置EXCEL,包含[info_tables,info_columns]两个Sheet页;")
        print("其中:")
        print("[info_tables]包含[TABLE_NAME,ALTER_NAME,VALID_FLAG]列;")
        print("[info_columns]包含[TABLE_NAME,COLUMN_NAME,ALTER_COLUMN_NAME,VALID_FLAG]列;")
        print("[VALID_FLAG]为有效性配置字段,0无效 1有效,无效标记行将不会被识别!")
        print("转换后将自动删除原EXCEL文件,转换前请做好数据备份,将EXCEL名称命名为原表名称并将工作表置于第1个Sheet页!")
        ipt=db.getinput("请输入表结构转换配置EXCEL文件名称,包含扩展名[.xlsx]或[.xls],例如,[info_tables.xlsx]...",re.compile(r'(.*)\.xls([x]{0,1})',re.I))
        info_tables,info_columns=check_excel(ipt)
        while info_tables is None:
            ipt=db.getinput("文件["+str(ipt)+"]内容不符合要求,请重新指定EXCEL文件,包含扩展名[.xlsx]或[.xls],例如,[info_tables.xlsx]...",re.compile(r'(.*)\.xls([x]{0,1})',re.I))
            info_tables,info_columns=check_excel(ipt)
        if db.getinput("是否保留新表没有的字段?,y/n:",re.compile(r'([yn]{1})',re.I)).lower()=='y':
            keep_old_columns=True
        else:
            keep_old_columns=False
        print("获取读取表结构转换关系...")
        df_info_cvt=pd.merge(info_columns[['TABLE_NAME','COLUMN_NAME','ALTER_COLUMN_NAME','NO_ALTER_COLUMN']],info_tables[['TABLE_NAME','ALTER_NAME','NO_ALTER']],how='left',on=['TABLE_NAME'])
        if keep_old_columns==False:
            df_info_cvt=df_info_cvt.loc[(df_info_cvt['NO_ALTER']==False)&(df_info_cvt['NO_ALTER_COLUMN']==False)]
        new_table_cols=df_info_cvt[['TABLE_NAME','ALTER_COLUMN_NAME']].groupby(['TABLE_NAME'],as_index=False)['ALTER_COLUMN_NAME'].aggregate(list).reset_index().set_index('TABLE_NAME').to_dict(orient='dict')['ALTER_COLUMN_NAME']
        cvtdict=dict();tabledict=dict()
        for index,rows in df_info_cvt.iterrows():
            tabledict[str(rows['TABLE_NAME']).upper()]=str(rows['ALTER_NAME']).upper()
            cvtdict[str(rows['TABLE_NAME']).upper()+"|"+str(rows['COLUMN_NAME']).upper()]=str(rows['ALTER_COLUMN_NAME']).upper()
        filelist=os.listdir(os.getcwd())
        for file in filelist:
            tabname=str(file).rstrip(".xlsx").rstrip(".xls").upper()
            if tabname in tabledict:
                print("转换表: ["+tabname+"]-->["+tabledict[tabname]+"]")
                df_tab=pd.read_excel(os.getcwd()+os.sep+str(file),0)
                for col in list(df_tab):
                    if tabname+"|"+str(col).upper() in cvtdict:
                        df_tab.rename(columns={str(col):cvtdict[tabname+"|"+str(col).upper()]},inplace=True)
                os.remove(str(file))
                if keep_old_columns==False:df_tab=df_tab[new_table_cols[tabname]]
                if str(tabledict[tabname]+".xlsx").lower() not in [str(file).lower() for file in os.listdir(os.getcwd())]:
                    df_tab.to_excel(tabledict[tabname]+".xlsx",sheet_name=tabledict[tabname],index=False)
                else:
                    df_tab.to_excel(tabledict[tabname]+"_"+db.SnowID()+".xlsx",sheet_name=tabledict[tabname],index=False)
                print(df_tab.head())
        print("EXCEL表结构转换执行完成!")
    except:
        print(str(os.sys.exc_info()[1]))
        print(traceback.format_exc())
        print("EXCEL表结构转换执行失败,请检查后重试!")

#判断excel文件是否满足要求
def check_excel(file):
    try:
        #设置默认表字段
        info_tables_columns=['TABLE_NAME','ALTER_NAME','VALID_FLAG']
        info_columns_columns=['TABLE_NAME','COLUMN_NAME','ALTER_COLUMN_NAME','VALID_FLAG']
        print("检查文件["+str(file)+"]...")
        xls=pd.ExcelFile(str(file)) #读取文件
        sheet_names=xls.sheet_names #读取Sheet名称
        if len(sheet_names)>1 and len(set([str(sheet) for sheet in sheet_names]).intersection({'info_tables','info_columns'}))==2:
            #读取表内容
            df_info_tables=pd.read_excel("./"+str(file),sheet_name=str('info_tables'))
            df_info_columns=pd.read_excel("./"+str(file),sheet_name=str('info_columns'))
            #计算符合要求字段数量
            chk_info_tables=len(set(df_info_tables.columns).intersection(set(info_tables_columns)))
            chk_info_columns=len(set(df_info_columns.columns).intersection(set(info_columns_columns)))
            #如果每个表字段都符合要求,返回1,否则返回0
            if chk_info_tables==3 and chk_info_columns==4:
                #字段值规范化并过滤未生效内容-info_tables
                df_info_tables["TABLE_NAME"]=df_info_tables["TABLE_NAME"].fillna('').astype(str)
                df_info_tables["ALTER_NAME"]=df_info_tables["ALTER_NAME"].fillna('').astype(str)
                df_info_tables["VALID_FLAG"]=df_info_tables["VALID_FLAG"].fillna(0).astype(int)
                df_info_tables=df_info_tables.loc[(df_info_tables['VALID_FLAG']==1)&(~(df_info_tables['TABLE_NAME']==''))][info_tables_columns]
                #字段值规范化并过滤未生效内容-info_columns
                df_info_columns['TABLE_NAME']=df_info_columns['TABLE_NAME'].fillna('').astype(str)
                df_info_columns['COLUMN_NAME']=df_info_columns['COLUMN_NAME'].fillna('').astype(str)
                df_info_columns['ALTER_COLUMN_NAME']=df_info_columns['ALTER_COLUMN_NAME'].fillna('').astype(str)
                df_info_columns["VALID_FLAG"]=df_info_columns["VALID_FLAG"].fillna(0).astype(int)
                df_info_columns=df_info_columns.loc[(df_info_columns['VALID_FLAG']==1)&(~(df_info_columns['COLUMN_NAME']==''))&(df_info_columns['TABLE_NAME'].isin(df_info_tables["TABLE_NAME"].values.tolist()))][info_columns_columns]
                #整理ALTER_NAME,如为空则设为原名
                df_info_tables['NO_ALTER']=df_info_tables.apply(lambda x:True if str(x['ALTER_NAME']).strip()=='' else False,axis=1,result_type='reduce')
                df_info_tables['ALTER_NAME']=df_info_tables.apply(lambda x:x['TABLE_NAME'] if str(x['ALTER_NAME']).strip()=='' else str(x['ALTER_NAME']).strip(),axis=1,result_type='reduce')
                df_info_columns['NO_ALTER_COLUMN']=df_info_columns.apply(lambda x:True if str(x['ALTER_COLUMN_NAME']).strip()=='' else False,axis=1,result_type='reduce')
                df_info_columns['ALTER_COLUMN_NAME']=df_info_columns.apply(lambda x:x['COLUMN_NAME'] if str(x['ALTER_COLUMN_NAME']).strip()=='' else str(x['ALTER_COLUMN_NAME']).strip(),axis=1,result_type='reduce')
                return df_info_tables,df_info_columns
            else:
                if chk_info_tables!=3:
                    print("[info_tables]缺少: "+",".join(set(info_tables_columns).difference(df_info_tables)))
                if chk_info_columns!=4:
                    print("[info_columns]缺少: "+",".join(set(info_columns_columns).difference(df_info_columns)))
                return None,None
        else:
            lack_sheets={'info_tables','info_columns'}.difference(set([str(sheet) for sheet in sheet_names]))
            print("缺少："+",".join(lack_sheets))
            return None,None
    except:
        print(str(os.sys.exc_info()[1]))
        print(traceback.format_exc())
        print("文件读取错误,请检查文件后重试,注意取消文件内的筛选状态!")
        return None,None

