import numpy as np
import pandas as pd
from abc import ABC, abstractmethod
import random
from collections import OrderedDict
from typing import Union


class AbstractDataset(ABC):

    def __init__(self, path: str):
        self.data = None
        self.target = None
        self._load_dataset_pipeline(path)

    @abstractmethod
    def _load_dataset_pipeline(self, path: str) -> None:
        """
        需实现加载数据集的pipeline
        当类被初始化时总会调用该方法
        该方法需要实现读取数据集并进行数据预处理
        @param path: 数据集路径
        @return: None
        """
        pass

    @abstractmethod
    def convert_new_data(self, new_data: Union[list, np.array]) -> np.array:
        """
        将新样本转换为数据集格式抽象方法
        @param new_data: 新数据
        @return: 转换完成后的数据
        """
        pass

    def get_train_test_set(self, train_ratio: float, random_state: int = 42) -> tuple[np.array, np.array, np.array, np.array]:
        """
        划分数据集，得到训练集和测试集
        @param train_ratio: 训练集占比例
        @param random_state: 随机数种子
        @return: 训练集，测试集，训练集标签， 测试集标签
        """
        assert len(self.data) == len(self.target), '数据长度和标签长度应相同'

        random.seed(random_state)

        full_set = []
        for i in range(len(self.data)):
            full_set.append((self.data[i], self.target[i]))

        random.shuffle(full_set)

        data = np.array([x[0] for x in full_set])
        target = np.array([x[1] for x in full_set])

        train_count = int(len(self.data) * train_ratio)

        train_data, train_target = data[:train_count], target[:train_count]
        test_data, test_target = data[train_count:], target[train_count:]

        return train_data, test_data, train_target, test_target


class WatermelonDataset(AbstractDataset):
    def __init__(self, path):
        self.str_map = OrderedDict()
        super().__init__(path)

    def _load_dataset_pipeline(self, path) -> None:
        """
        数据集加载pipeline
        @param path: 数据集路径
        @return: None
        """
        dataset = self._load_dataset(path)
        dataset = self._convert_and_encode(dataset)
        dataset = self._convert_to_array(dataset)
        self.data, self.target = self._split_data_and_target(dataset)
        print('数据集加载成功！')

    @staticmethod
    def _load_dataset(path: str) -> pd.DataFrame:
        """
        使用加载数据集，转dataframe
        @param path: 数据集路径
        @return:
        """
        with open(path, 'r', encoding='utf-8') as f:
            dataset = f.readlines()

        dataset = [x.replace('\n', '') for x in dataset]
        dataset = [x.split('\t') for x in dataset]

        df = pd.DataFrame(dataset[1:], columns=dataset[0])
        df = df.set_index(dataset[0][0])

        return df

    def _convert_and_encode(self, df: pd.DataFrame) -> pd.DataFrame:
        """
        将可以转化为浮点数的字符串转换为浮点数，否则编码为整数
        @param df: 原dataframe
        @return: 编码后的dataframe
        """

        # 遍历DataFrame的每个元素
        for col in df.columns:

            if self.str_map.get(col, None) is None:
                self.str_map[col] = {}
            str_to_int = self.str_map[col]

            next_int = 0
            for idx in df.index:
                try:
                    # 尝试将元素转换为浮点型
                    df.loc[idx, col] = float(df.loc[idx, col])
                except ValueError:
                    # 如果不能转换为浮点型，检查元素是否已经在字典中
                    if df.loc[idx, col] not in str_to_int:
                        # 如果不在字典中，将其添加到字典中，并使用下一个可用的整数作为值
                        str_to_int[df.loc[idx, col]] = next_int
                        next_int += 1
                    # 用整数替换原始字符串
                    df.loc[idx, col] = str_to_int[df.loc[idx, col]]
        return df

    @staticmethod
    def _convert_to_array(df: pd.DataFrame) -> np.array:
        """
        将编码后的dataframe转化为array
        @param df: 编码后的dataframe
        @return: array
        """
        return df.values

    @staticmethod
    def _split_data_and_target(dataset: np.array) -> tuple[np.array, np.array]:
        """
        切分数据属性与标签
        @param dataset: 原数据集
        @return: 切分后的数据集
        """
        return dataset[:, :-1], dataset[:, -1:]

    def convert_new_data(self, new_data: Union[list, np.array]) -> np.array:
        """
        将新数据编码、转化为数据集所用array格式
        @param new_data: 新数据
        @return: 转换后的np.array
        """

        covered_data = []
        for data_point in new_data:
            covered_data.append([])
            for idx, key in enumerate(self.str_map):
                if idx == len(data_point):
                    break
                try:
                    value = float(data_point[idx])
                except:
                    value = self.str_map[key].get(data_point[idx], len(self.str_map[key]))

                covered_data[-1].append(value)
        covered_data = np.array(covered_data, dtype=object)

        return covered_data
