

import copy
from typing import Any, Optional, List, Union
from typing_extensions import Unpack, NotRequired, Required, TypedDict
from pydantic.fields import FieldInfo as PydanticFieldInfo, _FieldInfoInputs
from pydantic_core import PydanticUndefined as Undefined
from tablestore import SecondaryIndexType, PK_AUTO_INCR
from .utils import Utils

import tablestore
class SearchFieldSchema(tablestore.FieldSchema):
    """多元索引建表参数(简化)
    """
    def __init__(self, field_name: str='', field_type: tablestore.FieldType=None, index=True,
                 store=True, is_array=False, enable_sort_and_agg=False,
                 analyzer: tablestore.AnalyzerType=None, 
                 analyzer_parameter: Union[tablestore.SplitAnalyzerParameter, tablestore.FuzzyAnalyzerParameter]=[], 
                 sub_field_schemas: List[tablestore.FieldSchema]=[], 
                 date_formats: List[str]=[], is_virtual_field=False, vector_options=None):
        """ 多元索引建表参数
        https://help.aliyun.com/zh/tablestore/developer-reference/create-search-indexes-by-using-python-sdk?spm=a2c4g.11186623.0.0.5fd2691940Yc2o

        Args:
            field_name (str, optional): 列名，不填默认使用column的name. Defaults to ''.
            field_type (tablestore.FieldType, optional): 类型，不填自动识别基础类型. Defaults to None.
            index (bool, optional): 是否开启索引. Defaults to True.
            store (bool, optional): 是否在多元索引中附加存储该字段的值. Defaults to True.
            is_array (bool, optional): 是否为数组. Defaults to False.
            enable_sort_and_agg (bool, optional): 是否开启排序与统计聚合功能. Defaults to False.
            analyzer (tablestore.AnalyzerType, optional): 分词器类型。当字段类型为Text时，可以设置此参数. split表示自定义分隔符。Defaults to None.
            analyzer_parameter (Union[tablestore.SplitAnalyzerParameter, tablestore.FuzzyAnalyzerParameter], optional): 分词参数. Defaults to [].
            sub_field_schemas (List[tablestore.FieldSchema], optional): 当字段类型为Nested类型时，需要通过此参数设置嵌套文档中子列的索引类型. Defaults to [].
            date_formats (List[str], optional): 日期的格式. Defaults to [].
            is_virtual_field (bool, optional): 是否为虚拟列. Defaults to False.
            vector_options (_type_, optional): 向量字段类型的属性参数. Defaults to None.
        """
        
        super().__init__(field_name=field_name, field_type=field_type, index=index,
                 store=store, is_array=is_array, enable_sort_and_agg=enable_sort_and_agg,
                 analyzer=analyzer, sub_field_schemas=sub_field_schemas, analyzer_parameter=analyzer_parameter,
                 date_formats=date_formats, is_virtual_field=is_virtual_field, source_fields=[], vector_options=vector_options)

class _column_field_info_base(TypedDict, total=False):
    name: str
    json_obj: bool
    sec_index_tb_name: str
    sec_index_type: SecondaryIndexType
    extra_search_scheme: SearchFieldSchema

class _column_field_info( _column_field_info_base, _FieldInfoInputs, total=False):
    pass

class _primary_key_field_info(_column_field_info, total=False):
    index: Required[int]
    autoincrement: bool
    
class ColumnInfo(PydanticFieldInfo):
    
    def __init__(self, **kwargs: Unpack[_column_field_info]) -> None:
        """

        Args:
            json_obj (bool, optional): 从库中读取一航数据给该列赋值时是否需要进行json反序列化（既库中该列的类型为json str）. Defaults to False.
            extra_search_scheme: 额外的多元索引建表参数，没有会创建按默认值创建
        """
        self.name = kwargs.pop('name', '')
        self.json_obj = kwargs.pop('json_obj', False)
        self.sec_index_tb_name = kwargs.pop('sec_index_tb_name', None) # 二级索引名称
        self.sec_index_type = kwargs.pop('sec_index_type', SecondaryIndexType.GLOBAL_INDEX) # 二级索引类型，全局/本地
        self.extra_search_scheme = kwargs.pop('extra_search_scheme', None)
        super().__init__(**kwargs)
        

    def __str__(self):
        return self.__repr__()
 
    def __repr__(self):
        return f'{self.__class__.__name__}, name: {self.name}'


class PrimaryKeyInfo(ColumnInfo):
    def __init__(self, **kwargs: Unpack[_primary_key_field_info]) -> None:
        self.index = kwargs.pop('index', None)
        if self.index == None:
            raise ValueError('PrimaryKey must set index')
        self.autoincrement = kwargs.get('autoincrement', False)
        sec_index_type = kwargs.get('sec_index_type', None)
        sec_index_tb_name = kwargs.get('sec_index_tb_name', None)
        if self.autoincrement and sec_index_tb_name != None and sec_index_type == SecondaryIndexType.GLOBAL_INDEX:
            raise ValueError('自增列不能作为全局二级索引的首个主键。请检查autoincrement和sec_index_type的配置。')
        if self.autoincrement:
            kwargs['default'] = None
            kwargs['annotation'] = Optional[int]

        ColumnInfo.__init__(self,  **kwargs)
        
def Column(**kwargs: Unpack[_column_field_info]) -> Any:
    return ColumnInfo(**kwargs)


def PrimaryKey(**kwargs: Any) -> Any:
    
    return PrimaryKeyInfo(**kwargs)

class SecondaryIndexScheme():
    pass

# class AutoHash:
#     def __init__(self, target: PrimaryKeyInfo, hash_res_len = 4) -> None:
#         self.target = target
#         self.hash_res_len = hash_res_len
#         self.target_value = 0

#     def value(self, target: Any) -> str:
#         if hasattr(target, self.target.name):
#             value = getattr(target, self.target.name)
#             if value and isinstance(value, (int, float, str)):
#                 self.target_value = value
#         return hash_id(id=self.target_value, len=self.hash_res_len)
    
# class AutoIncrement:
#     pass
    
# class AutoBucket:

#     def __init__(self, target: PrimaryKeyInfo = None, bucket_at_create = True, bucket_num = 16) -> None:
#         """ 表示该列自动分桶

#         Args:
#             target (PrimaryKeyInfo): 用于分桶的目标PK，必须是number类型
#             bucket_at_create (bool, optional): 当target为空时使用时间戳分桶时，用于确认创建实例时就确认分桶还是首次使用时再确认分桶. Defaults to True.
#             bucket_num (int, optional): 分桶数. Defaults to 16.
#         """
#         self.target = target
#         self.bucket_num = bucket_num
#         if target == None and bucket_at_create:
#             self.target_value = time.time()
#         else:
#             self.target_value = 0

#     def value(self, target: Any) -> int:
#         if self.target:
#             if hasattr(target, self.target.name):
#                 value = getattr(target, self.target.name)
#                 if value and isinstance(value, (int, float)):
#                     self.target_value = value
#         elif self.target_value == 0:
#                 self.target_value = time.time()
#         return bucket_id(ms = self.target_value, num_buckets = 16)
    
#     def __copy__(self):
#         # 创建一个对象的浅拷贝
#         # 通常，这会创建一个新对象，但是共享可变对象（如列表）
#         cls = self.__class__
#         new_obj = cls.__new__(cls)
#         new_obj.__dict__.update(self.__dict__)
#         return new_obj

#     def __deepcopy__(self, memo):
#         # 创建一个对象的深拷贝
#         # 这会创建一个新对象，并且递归复制所有可变对象
#         cls = self.__class__
#         new_obj = cls.__new__(cls)
#         memo[id(self)] = new_obj
#         for k, v in self.__dict__.items():
#             setattr(new_obj, k, copy.deepcopy(v, memo))
#         return new_obj
        
def default_updated_ms() -> int:
    """ 列预定义值：更新时间戳，保存数据行时更新为当前时间戳

    Returns:
        int: 时间戳
    """
    return Utils.time_ms()

def default_created_ms(b_create: bool=False) -> Optional[int]:
    """ 列预定义值：创建时间戳，只在第一次保存数据行的时候更新为当前时间戳

    Returns:
        int: 时间戳
    """
    if b_create:
        return Utils.time_ms()
    return None
