import re
import datetime
import time
import os
import api
import config
import logging
import utils
import pandas as pd
import json
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
logging.basicConfig(format=LOG_FORMAT, level=logging.ERROR)

logging.info(os.getcwd())


def rgb(r: int, g: int, b: int): return (r << 16) + (g << 8) + b


class WmsBase(api.Base):
    def __init__(self, table_id:str, filetype:str):
        super().__init__(config.LARK_HOST, config.APP_TOKEN)
        self.access_token = super().get_tenant_access_token(config.APP_ID, config.APP_SECRET)
        self.table_id = table_id
        self.filetype = filetype
       
    def read_records(self,
                     view_id: str = None,
                     field_names: str = None,
                     record_filter: str = None):
        has_more = True  # 为True表示后面还有数据，Lark每次只能返回500条数据，超过则要分页读取
        page_token = None  # 第一次发送Request时为None，默认为None。
        # page_size = 500
        # 如果数据超过500条，第一次Request的返回值中has_more为True，page_token 是下一页的token，
        df = pd.DataFrame()
        # 读取表格的所有数据
        # 循环读取直到has_more为False，说明已经读取完所有数据
        n = 1  # 用于记录这是第几页的page_token
        while has_more == True:
            n += 1
            resp = WmsBase.get_records_list(self,
                                            access_token=self.access_token,
                                            table_id=self.table_id,
                                            view_id=view_id,
                                            page_token=page_token,
                                            # page_size=page_size,
                                            field_names=field_names,
                                            record_filter=record_filter
                                            )
            has_more = resp["has_more"]
            page_token = resp["page_token"]
            print(f"page {n} token :", page_token)
            if resp['total'] == 0:break
            df_new = pd.DataFrame.from_dict(resp["items"])  # current_records转dataframe
            # 把表格数据列fileds拆解成多列，并将fileds本体删除
            df_new = pd.concat([df_new.drop('fields', axis=1), pd.json_normalize(df_new['fields'])], axis=1)
            # df.set_index('id', inplace=True)
            df_new.drop('record_id', axis=1, inplace=True)  # 删除record_id
            df = pd.concat([df, df_new])
        return df

    def read_records_updated(self, filedtype:str, field_names:list=[], view_id:str=""):
        # 封装 read_records,用于只读取最新的记录
        filename = f'{config.DATA_PATH}/{filedtype}.csv'
        try:
            file_mt_date = datetime.date.fromtimestamp(os.path.getmtime(filename))
            update_date = file_mt_date-datetime.timedelta(days=1)
            update_date = update_date.strftime('%Y-%m-%d')
            record_filter = f'CurrentValue.[最后更新时间]>TODATE("{update_date}")'
        except IOError:
            record_filter = None

        df = self.read_records(view_id=view_id,
                               record_filter=record_filter,
                               field_names=field_names)
        return df
    
    @staticmethod
    def clean_records(df):
        #assert df.empty != True,"df is empty"
        # 处理Product Name
        if 'Product Name.value_extra.options' in df.columns:
            df['Product Name.value_extra.options'] = df['Product Name.value_extra.options'].astype(
                'str')
            df.insert(2, 'Product Name', df.pop(
                'Product Name.value_extra.options').str.extract(r"'name': '(.*?)'"))
            df.drop(['Product Name.type', 'Product Name.value'],
                    axis=1, inplace=True)
            df['Product Name'] = df['Product Name'].str.replace(
                '\\xa0', " ", regex=False)
        return df
    
    def get(update=True):
        # 空函数，在继承时定义
        
        pass

    def update(self, update:bool=True):
        print(f'updating {self.filetype}')
        df = self.get(update=update)
        filename = f'{config.DATA_PATH}/{self.filetype}.csv'

        #assert not df.empty,"update dataframe is empty"
        
        if update :
            df_old = pd.read_csv(filename)  # 读取旧文件，以id作为索引
            df_new = pd.concat([df, df_old], sort=False)
            assert df_old.columns.all() == df_new.columns.all(),"columns not match"
            df_new.drop_duplicates(subset=['id'], inplace=True)
            df_new.to_csv(filename, index=False)  # 保存更新后的数据
        else:           
            df.to_csv(filename, index=False)# 如果不是更新，则直接保存文件
        


class Spec(WmsBase):   
    def __init__(self):
        super().__init__(config.PRODUCT_INFO_TABLE_ID,'product_info')
    
    def get(self,update=True):
        field_names = '["Product Code","Product Name","Type","Item","Target","Method"]'
        
        if update :
            df = Spec.read_records_updated(self,view_id=None, filedtype='product_info', field_names=field_names)
        else:
            df = Spec.read_records(self,view_id=None, field_names=field_names)

        df = Spec.clean_records(df)
        return df
    
    @staticmethod
    def clean_records(df):
        df = WmsBase.clean_records(df)
        if 'Method' in df.columns:
            df['Method'] = df['Method'].astype('str')
            df['Method'] = df['Method'].str.extract('\'(.*)\'')
            df = df.loc[:,["Product Code","Product Name","Type","Item","Target","Method",'id']]
        return df

class Statement(WmsBase):
    def __init__(self):
        super().__init__(config.PRODUCT_STATEMENT_TABLE_ID,'statement')
    
    def get(self,update=True):
        if update == True:
            df = Statement.read_records_updated(self,
                view_id=None, filedtype=self.filetype, field_names=None)
        else:
            df = Statement.read_records(self,view_id=None, field_names=None)

        df = Statement.clean_records(df)
        return df
    
    @staticmethod
    def clean_records(df):
        df = WmsBase.clean_records(df)
        if df.empty == False:
            df.insert(0, 'product code', df.pop('Product Code'))
            df.insert(1, 'product name', df.pop('Product Name'))
            df.drop('Index', inplace=True, axis=1)
        return df

class Coa(WmsBase):
    def __init__(self):
        super(Coa, self).__init__(config.PRODUCT_INFO_TABLE_ID,'coa target')
        self.view_id = config.PRODUCT_INFO_COA_VIEW_ID

    def get(self, update=True):
        field_names = '["Product Code", "Product Name","Item","Target","Target (NLT)","Target (NMT)","Method","unit"]'

        if update == True:
            df = self.read_records_updated(view_id=self.view_id, filedtype=self.filetype, field_names=field_names)
        else:
            df = self.read_records(view_id=self.view_id,
                                   field_names=field_names)

        df = self.clean_records(df) 
        return df
    
    @staticmethod
    def clean_records(df):
        df = WmsBase.clean_records(df)
        # 当df不为空时清洗数据
        if df.empty == False:
            df = df.copy()
            # 取出需要的列
            columns = ['Product Code',
                       'Product Name',
                       'Item',
                       'Target',
                       'Target (NLT)',
                       'Target (NMT)',
                       'Method',
                       'unit.value',
                       'id']
            df = df.loc[:, columns]
            df.columns = ['product_code', 'product_name', 'test_items',
                          'target', 'target_nlt', 'target_nmt', 'method', 'unit', 'id']

            # 处理Method带方括号的问题
            df['method'] = df['method'].astype('str')
            df['method'] = df['method'].str.extract('\'(.*)\'')
        return df


class TestReport(WmsBase):
    def __init__(self):
        super().__init__(config.TEST_REPORT_TABLE_ID,'test report')
        self.view_id = None

    def get(self, update=True):
        print('update test report')
        field_names = '["Product Code","BKD Batch*","Product Name","Test Items","Test Result","Target","Result(auto)", "COA file","Parent items","Unit"]'
        if update == True:
            df = self.read_records_updated(view_id=self.view_id, filedtype=self.filetype, field_names=field_names)
        else:
            df = self.read_records(view_id=self.view_id,field_names=field_names)
        df = self.clean_records(df)
        return df
    
    @staticmethod
    def clean_records(df):
        df = WmsBase.clean_records(df)
        if df.empty == False:
            df = df.copy()
            # 将Index公式列转换为文本列
            search_list = ['Product Code',
                           'BKD Batch*', 'Target', 'Result(auto)']
            search_list = [i for i in search_list if i in df.columns]
            for i in search_list:
                df[i] = df[i].astype('str')
                df[i] = df[i].str.extract(r"': '(.*?)',")

            df['Parent items'] = df['Parent items'].astype('str')
            df['Parent items'] = df['Parent items'].str.extract(
                r"ids': \['(.*?)'\]")
        if df.shape[1]>10:
            df = df.loc[:,["Product Code","BKD Batch*","Product Name","Test Items","Test Result","Target","Result(auto)", "COA file.link","Parent items","Unit.value",'id']]
            df.columns =['code','batch','product_name','test_items','test_result','target','result','coa_link','parent_items','unit','id']
        else:
            df = pd.DataFrame(data = None,columns=['code','batch','product_name','test_items','test_result','target','result','coa_link','parent_items','unit','id'])
        return df

if __name__ == "__main__":
    pass