from unicodedata import name
from minio import Minio
import pyspark
import pyspark.sql.functions as fn
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType,StructField, StringType, IntegerType, DateType, DoubleType, LongType
from delta.tables import DeltaTable
import hashlib
import pandas as pd
from datetime import datetime
import os

import scipy as sp
              
class FileMetaTableSchema(DeltaTable):
       def __init__(self, file_customized_schema:StructType) -> None:       
              self.file_decoded_schema = StructType()
              self.file_default_schema = StructType() \
                     .add("meta_table_name",StringType(),True) \
                     .add("file_name",StringType(),True) \
                     .add("file_md5",StringType(),True) \
                     .add("file_format",StringType(),True) \
                     .add("file_size",LongType(),True) 
              self.file_customized_schema = file_customized_schema
              
class DataSetTableSchema(object):
       def __init__(self, dataset_customized_schema:StructType) -> None:  
              self.dataset_default_schema = StructType() \
                     .add("ower_name",StringType(),True) \
                     .add("dataset_name",StringType(),True) \
                     .add("timestamp",DateType(),True) \
                     .add("meta_table_name",StringType(),True) \
                     .add("file_number",IntegerType(),True) \
                     .add("volume_number",LongType(),True)       
              self.dataset_customized_schema = dataset_customized_schema
              
                                  
class DataSet(object):
       def __init__(self,user_name:str,dataset_name:str,metatable_schema:DataSetTableSchema) -> None:
              # Discription:
              # init DataSet basic varibles and init S3 storage server, sparkSession 
              self.dataset_metatable_schema = metatable_schema 
              
              self.file_metatabel =None
              
              self.fs_config = None
              self.db_config = None

              self.bucket_name = 'my-bucket'
              self.minioClient = Minio('172.17.0.2:9000',
                            access_key='root',
                            secret_key='password',
                            secure=False)
              
              self.delta_spark_engine =  SparkSession.builder \
                            .master("local[1]") \
                            .appName('ml-data-store')  \
                            .config('spark.sql.extensions', 'io.delta.sql.DeltaSparkSessionExtension') \
                            .config('spark.sql.catalog.spark_catalog', 'org.apache.spark.sql.delta.catalog.DeltaCatalog') \
                            .getOrCreate()
              
              
       def init(self, metatabel_schema:FileMetaTableSchema):
              self.file_metatabel_schema = metatabel_schema
              print(self.file_metatabel_schema.file_default_schema[1].name)
              
                                   
       def fs_client(self,fs_config):
              # fs_config: the configuration of account info
              # return a local fs client

              pass
                     
       def pushFile(self,file_path:str) -> bool:
              if self.file_metatabel_schema.file_default_schema is None:
                     return 
              
              default_schema_df = self.__getDefaultSchema(file_path)
              print(default_schema_df)
              
              self.minioClient.fput_object(self.bucket_name, 
                                           default_schema_df[1],
                                           file_path)
              
              decoded_schema_df = None
              self.delta_spark_engine.read.format("csv").option("encoding","gbk").load(file_path)
             
              if default_schema_df[3]  == "csv" :
                     decoded_schema_df = self.delta_spark_engine.read.format("csv") \
                     .option("encoding","gbk") \
                     .load(file_path)
                     #print(decoded_schema_df)
              
              if default_schema_df[3]  == "json" :
                     decoded_schema_df  = self.__delta_spark_engine.read.format("json").load(file_path).schemas
                     print(decoded_schema_df)
                     
              if default_schema_df[3] == "parquet" :
                     decoded_schema_df  = self.__delta_spark_engine.read.format("parquet").load(file_path).schemas
                     print(decoded_schema_df)
                     
              if default_schema_df[3] == "avro" :
                     decoded_schema_df = self.__delta_spark_engine.read.format("avro").load(file_path).schemas
                     print(decoded_schema_df)
              
              
              self.file_metatabel = DeltaTable.replace(self.__delta_spark_engine) \
                                   .tableName(self.dataset_name)  \
                                   .addColumns(self.file_metatabel_schema)   \
                                   .execute()
                                   
              self.file_metatabel.write.format("delta").save("./delta-table")
       
              return True   
                 
       def pullFile(self,dataset_name):
              # 1. get metatable of the dataset
              # 2. get file_number of the dataset
              # 3. using a loop to implement s3.file_get(file_name)
           pass   
       
       def removeFile(self, file_name):
                  pass
       
       def removeFile(self, file_md5):
              pass

       def removeDataSet(self, dataset_name):
              pass              
       
       def getMetaInfo(self,dataset_name) -> list:
              pass
       
       def __getDefaultSchema(self, file_path:str) -> list:
           #  1. get file_name
           #  2. get file_md5
           #  3. get file_format 
              file_name = file_path.split("/")[-1]
              md5file=open(file_path,'rb')
              file_md5=hashlib.md5(md5file.read()).hexdigest()
              md5file.close()
              file_format = file_path.split(".")[-1]              
              file_size = os.path.getsize(file_path)
              
              '''
              #default_schema=[("meta_table_name",str(self.dataset_metatable_schema.dataset_default_schema["meta_table_name"].name)),
              #                ("file_name",str(file_name)),
              #                ("file_md5",str(file_md5)),
              #                ("file_format",str(file_format)),
              #                ("file_size",(file_size))]
              
              #print("show schema ",default_schema)
              #print(self.file_metatabel_schema.file_default_schema)
              
              #df_default_schema = self.__delta_spark_engine.createDataFrame(default_schema)
              #print("show df schema ",df_default_schema.collect())
              '''
              default_schema = list()
              default_schema.append(self.dataset_metatable_schema.dataset_default_schema["meta_table_name"].name)
              default_schema.append(file_name)
              default_schema.append(file_md5)
              default_schema.append(file_format)
              default_schema.append(file_size)
              return default_schema

       def __gettimestap(self):
              now = datetime.now()
              return datetime.timestamp(now)
       
       def db_client(self,db_config): 
              # db_config: the configuration of DB account info
              # return a database connection client
              pass
       
       def ingestData(self):
              # 1. ingest data into datastore using spark engine
              # 2. write data into datastore with data lake table format
              pass
       
       def dataLoader(self):
              #1. reader data into memory using spark/flink/presto/dask/pytorch/tensorflow engine and so on 
              pass


dataset_schema = DataSetTableSchema(dataset_customized_schema = ["team_name"])       
ds = DataSet("binbinm","face_detection", metatable_schema=dataset_schema)
#print(ds.getMetaInfo("face_detection"))

file_path = "./sales_2008-2011.csv"
file_schema = FileMetaTableSchema(file_customized_schema = ["labe_file"])

ds.init(file_schema)
ds.pushFile(file_path)  

