"""
数据解析 python版
"""
import re
import ujson as json
import pytz
import datetime
import pandas as pd
from .auto_extract import create_patterns


class Processor:

    PROCESSOR_NAME = ''
    
    def pre_convert(cls, df, pre_conditions):
        if not pre_conditions:
            return True
        return False

    def process(cls, **kwargs):
        return cls.process()

    @staticmethod
    def pytz(name):
        try:
            return pytz.timezone(name)
        except pytz.exceptions.UnknownTimeZoneError:
            return pytz.timezone('UTC')

    @classmethod
    def tz_ts(cls, date, tz):
        """时间 修正为对应时区的时间"""
        date = date.astimezone(cls.pytz(tz))
        utc_offset = date.utcoffset().total_seconds()
        date = date - datetime.timedelta(seconds=utc_offset)
        return int(date.timestamp() * 1000)


class AddProcessor(Processor):
    PROCESSOR_NAME = 'field_add'

    @classmethod
    def process(cls, df, field, value):
        df.loc[:, field] = value
        return df
    

class CopyProcessor(Processor):
    PROCESSOR_NAME = 'field_copy'

    @classmethod
    def process(cls, df, s_field, field):
        df.loc[:, field] = df[s_field]
        return df


class RenameProcessor(Processor):
    PROCESSOR_NAME = 'field_rename'

    @classmethod
    def process(cls, df, s_field, field):
        df = df.rename(columns={s_field: field})
        return df


class RemoveProcessor(Processor):

    PROCESSOR_NAME = 'field_remove'

    @classmethod
    def process(cls, df, fields):
        df = df.drop(columns=fields)
        return df


class KVProcessor(Processor):
    PROCESSOR_NAME = 'kv'

    def _process(value, f_delimiter, v_delimiter):
        pairs = value.split(f_delimiter)
        return {pair.split(v_delimiter)[0]: pair.split(v_delimiter)[1] for pair in pairs}

    @classmethod
    def process(cls, df, field, f_delimiter=',', v_delimiter='='):
        kv_df = pd.DataFrame(df[field].apply(lambda x: cls._process(x, f_delimiter, v_delimiter)).tolist())
        df = pd.concat([df, kv_df], axis=1)
        return df
    

class JSONProcessor(Processor):
    PROCESSOR_NAME = 'json'

    def _process(value):
        return json.loads(value)

    @classmethod
    def process(cls, df, field):
        json_df = pd.DataFrame(df[field].apply(cls._process).tolist())
        df = pd.concat([df, json_df], axis=1)
        return df


class RegexProcessor(Processor):

    PROCESSOR_NAME = 'regex'

    @classmethod
    def _process(cls, value, pattern):
        pattern = re.search(pattern, value)
        if not pattern:
            return None
        return pattern.groupdict()

    @classmethod
    def process(cls, df, field, pattern):
        regex_df = pd.DataFrame(df[field].apply(lambda x: cls._process(x, pattern)).tolist())
        df = pd.concat([df, regex_df], axis=1)
        return df


class RegexReplaceProcessor(Processor):

    PROCESSOR_NAME = 'value_regex_replace'

    @classmethod
    def process(cls, df, field, pattern, replace_str):
        df.loc[:, field] = df[field].apply(lambda x: re.sub(pattern, replace_str, x))
        return df


class TypeConvertProcessor(Processor):

    PROCESSOR_NAME = 'value_convert_type'

    @classmethod
    def process(cls, df, map):
        for field, type in map.items():
            if type == 'int':
                df[field] = pd.to_numeric(df[field]).astype(int)
                continue
            df[field] = df.astype(type)
        return df


class TimeProcessor(Processor):

    PROCESSOR_NAME = 'time'

    @classmethod
    def _process(cls, value, format, tz):
        date = datetime.datetime.strptime(value, format)
        ts = cls.tz_ts(date, tz)
        return ts


    @classmethod
    def process(cls, df, field, format, tz='UTC'):
        df.loc[:, field] = df[field].apply(lambda v: cls._process(v, format, tz))
        return df


class TimeStampProcessor(Processor):

    PROCESSOR_NAME = 'timestamp'

    @classmethod
    def _process(cls, ts, tz):
        if ts > 15766344150000:  # 微秒
            ts = ts / 1000000
        elif ts > 15766344150:  # 毫秒
            ts = ts / 1000

        date = datetime.datetime.fromtimestamp(float(ts))
        ts = cls.tz_ts(date, tz)
        return ts

    @classmethod
    def process(cls, df, field, tz='UTC'):
        df.loc[:, field] = df[field].apply(lambda v: cls._process(v, tz))
        return df


class LowerProcessor(Processor):
    PROCESSOR_NAME = 'value_lower'

    @classmethod
    def process(cls, df, field):
        df.loc[:, field] = df[field].str.lower()
        return df


class UpperProcessor(Processor):

    PROCESSOR_NAME = 'value_upper'

    @classmethod
    def process(cls, df, field):
        df.loc[:, field] = df[field].str.upper()
        return df


class StripProcessor(Processor):

    PROCESSOR_NAME = 'value_strip'

    @classmethod
    def process(self, df, fields, strip_chars: str = None):
        for field in fields:
            df.loc[:, field] = df[field].str.strip(strip_chars)
        return df


class ConcatProcessor(Processor):

    PROCESSOR_NAME = 'value_concat'

    @classmethod
    def process(cls, df, field, s_fields, char: str='_'):
        df.loc[:, field] = df[s_fields].apply(lambda row: char.join(row), axis=1)
        return df


class ValueMapProcessor(Processor):

    PROCESSOR_NAME = 'value_map'

    @classmethod
    def process(cls, df, field, map, default: str=None):
        df.loc[:, field] = df[field].apply(lambda x: map.get(x, default))
        return df


class ValueSetProcessor(Processor):

    PROCESSOR_NAME = 'value_update'

    @classmethod
    def process(cls, df, field, value):
        df.loc[:, field] = value
        return df


class ValueReplaceProcessor(Processor):

    PROCESSOR_NAME = 'value_replace'

    @classmethod
    def process(cls, df, field, origin_str, replace_str):
        df.loc[:, field] = df[field].str.replace(origin_str, replace_str)
        return df


class ExtractProcessor(Processor):
    PROCESSOR_NAME = 'value_auto_extract'

    @classmethod
    def extract_pattern(cls, sample, configure, pattern):
        if pattern:
            return pattern
        if not configure:
            return ''
        return create_patterns(sample, configure)

    @classmethod
    def process(cls, df, field, pattern=''):
        if not pattern:
            return df
        return RegexProcessor.process(df, field, pattern)


ProcessMap = {item.PROCESSOR_NAME: item for item in Processor.__subclasses__()}


class Parser:
    RawField = '_raw'

    @classmethod
    def identifier(cls, df, identifiers):
        """过滤无效数据"""
        if not identifiers:
            return df

        for _identifier in identifiers:
            op, subs = _identifier['op'], _identifier['subs']

            mask = df[cls.RawField].apply(lambda x: any(item in x for item in subs))
            if op == 'pass':
                df = df[mask]
            else:
                df = df[~mask]

        df = df.reset_index(drop=True)  # 过滤了数据对齐索引
        return df
    
    @classmethod
    def extract(cls, df, pattern, field='_raw'):
        return RegexProcessor.process(df, field=field, pattern=pattern)

    @classmethod
    def transform(cls, df, transforms):
        for _transform in transforms:
            processor = ProcessMap[_transform['name']]
            df = processor.process(df, **_transform['configure'])
        return df
    
    @classmethod
    def parser(cls, df, extract, transforms):
        df = cls.extract(df, pattern=extract.get('pattern', ''))
        df = cls.transform(df, transforms=transforms)
        return df
    

class GroupParser(Parser):
    """按解析组解析"""

    @classmethod
    def _row_pass_rule(cls, row, identifiers):
        """原始数据是否匹配规则"""
        return any([all([(True if any(sub in row for sub in subs) else False) if k == 'pass' 
                         else (False if any(sub in row for sub in subs) else True) for k, subs in identifier.items()]) 
                         for identifier in identifiers])

    @classmethod
    def category_rule(cls, row, groups):
        """解析规则组
        None ==> 默认日志
        """
        for group in groups:
            for rule in group['rules']:
                if identifiers := rule.get('identifiers'):  # 没有限制特征 一律通过
                    if cls._row_pass_rule(row, identifiers):
                        return rule['id']
                else:
                    return rule['id']
        return None
    
    @classmethod
    def identifier_by_rule(cls, df, groups, field='_raw'):
        """group_rules"""
        df.loc[:, '_parserid'] = df[field].apply(lambda row: cls.category_rule(row, groups))

        # drop 数据
        df = df.dropna(subset=['_parserid']).reset_index(drop=True)
        return df
    
    @classmethod
    def process(cls, group_df, configure):

        group_df.loc[:, '_modelid'] = configure.get('model_id')
        return cls.parser(group_df, extract=configure['extract'], transforms=configure['transforms'])

    @classmethod
    def group_parser(cls, df, groups, config_map):
        df = cls.identifier_by_rule(df, groups)
        if df.empty:
            return []
        
        groupd = df.groupby('parser_rule_id')
        return [cls.process(group_df.reset_index(drop=True), config_map[key]) for key, group_df in groupd]


if __name__ == "__main__":
    import pandas as pd
    data = {
        '_raw': ['Alice', 'Bob', 'Charlie', 'David', 'Eve'],
        'B': ['alice@example.com', 'bob@example.com', 'charlie@example.com', 'david@example.com', 'eve@example.com'],
        'C': ['New York', 'Los Angeles', 'Chicago', 'Houston', 'Phoenix'],
        'D': [1, 2, 3, 4, 5],
        'F': ['aa=ff,bb=tt', 'aa=f1f,bb=t1t', 'aa=f2f,bb=t1t', 'aa=f2f,bb=t2t', 'aa=f1f,bb=t1t'],
        'G': ['{"tt": "a"}', '{"tt": "a"}','{"tt": "ac"}','{"tt": "a"}','{"tt": "ab"}'],
        'X': ['2025-05-28T13:00:00','2025-05-28T13:00:00','2025-05-28T13:00:00','2025-05-28T13:00:00','2025-05-28T14:00:00'],
        'Y': [1748437200000, 1748437200000, 1748437200000, 1748437200000, 1748437200000],
        'EE': ['17484.37', '17484.37','17784.37','17483.22','17484']
    }
    _df = pd.DataFrame(data)

    df = Parser.parser(_df, 
                       extract={},
                       transforms=[{'name': 'value_lower', 'configure': {'field': 'B'}},
                                  {'name': 'value_upper', 'configure': {'field': 'C'}},
                                  {'name': 'value_update', 'configure': {'field': 'e', 'value': 1}},
                                  {'name': 'value_replace', 'configure': {'field': 'B', 'origin_str': '@', 'replace_str': '_'}},
                                  {'name': 'value_map', 'configure': {'field': 'e', 'map': {2: 'aa'}, 'default': '1a'}},
                                  {'name': 'value_strip', 'configure': {'fields': ['e'], 'strip_chars': 'a'}},
                                  {'name': 'value_concat', 'configure': {'s_fields': ['e', 'C'], 'field': 'cat'}},
                                  {'name': 'value_regex_replace', 'configure': {'field': 'C', 'pattern': 'O', 'replace_str': '_cba_'}},
                                  {'name': 'value_convert_type', 'configure': {'map': {'EE': 'int'}}},

                                  {'name': 'time', 'configure': {'field': 'X', 'format': '%Y-%m-%dT%H:%M:%S'}},
                                  {'name': 'timestamp', 'configure': {'field': 'Y'}},

                                  {'name': 'kv', 'configure': {'field': 'F'}},
                                  {'name': 'regex', 'configure': {'field': 'B', 'pattern': '(?P<name>[a-z]+)_.*com'}},
                                  {'name': 'json', 'configure': {'field': 'G'}},

                                  {'name': 'field_add', 'configure': {'field': 'H', 'value': 'add'}},
                                  {'name': 'field_copy', 'configure': {'field': 'I', 's_field': 'H'}},
                                  {'name': 'field_rename', 'configure': {'field': 'i', 's_field': 'I'}},
                                  {'name': 'field_remove', 'configure': {'fields': ['i']}}

                                  ])

    print(df)
