import abc
import json
import os
import random
import zipfile
from tqdm import tqdm


class BaseDataset(abc.ABC):

    def __init__(self, name, path, samples_num, sum_rows):
        self.name = name
        self.path = path
        self.sum_rows = sum_rows

        self.samples_num = samples_num

        self.dirname = os.path.join(self.path, name)
        if not os.path.exists(self.dirname):
            os.makedirs(self.dirname)

        self.zip_full_path = os.path.join(self.dirname, f"{self.name}.zip")

        self.data_info_path = os.path.join(self.dirname, f"info.json")

        self.pbar = tqdm(total=self.sum_rows, desc=f"[{self.name}]")

        self.compress_li = []

        self.desc = {
            "name": self.name,
            "samples_num": self.samples_num
        }

    @staticmethod
    def custom_func(desc):
        desc_li = desc.split('-')
        threshold_type = eval(desc_li[0])
        if desc_li[0] == 'int':
            return random.randint(threshold_type(desc_li[1]), threshold_type(desc_li[2]))
        elif desc_li[0] == 'float':
            if len(desc_li) == 4:
                num_quantiles = int(desc_li[3])
            else:
                num_quantiles = 4
            res_num = random.uniform(threshold_type(desc_li[1]), threshold_type(desc_li[2]))
            # return f"{res_num:.{num_quantiles}f}"
            return round(res_num, num_quantiles)
        elif desc_li[0] == 'str':
            return random.choice(desc_li[1:])
        else:
            raise ValueError("invalid")

    @staticmethod
    def get_randint(x, y):
        return random.randint(int(x), int(y))

    @staticmethod
    def get_random(x: int):
        return round(random.random() * x, 4)

    @abc.abstractmethod
    def run(self):
        pass

    def export(self, clear=True):
        z = zipfile.ZipFile(self.zip_full_path, 'w')
        for d in self.compress_li:
            z.write(d, os.path.basename(d))
            if clear:
                os.remove(d)
        z.close()

        with open(self.data_info_path, "w", encoding='utf-8') as f:
            f.write(json.dumps(self.desc, indent=4))


class VertDataset(BaseDataset):
    def __init__(self, name, path, members, samples_num=500, samples_radio=1.0):

        self.members = members
        self.samples_radio = samples_radio

        self.current_feature_idx = 0
        self.current_sample_idx = 0

        self.same_num = int(samples_num * samples_radio)

        sum_rows = samples_num * len(members)
        super(VertDataset, self).__init__(name, path, samples_num, sum_rows)

        self.desc.update({"members_num": len(members), "mode": "纵向"})
        self.set_desc()

    def set_desc(self):
        feature_count_li = []
        feature_types_li = []
        for m in self.members:
            feature_count_li.append(m['feature_count'])
            for t in m['feature_types']:
                ft = t.split('-')[0]
                if ft not in feature_types_li:
                    feature_types_li.append(ft)
        self.desc.update(
            {"desc": f"特征维度：{feature_count_li}, 特征类型：{feature_types_li}，交集比率：{self.samples_radio}"})

    def run(self):
        for mem_id, m in enumerate(self.members):
            target = False
            dataset_name = os.path.join(self.dirname, f"{self.name}-{m['role']}-{mem_id}.csv")

            self.compress_li.append(dataset_name)
            fw = open(dataset_name, 'w', encoding='utf8')
            feature_num = m['feature_count']
            feature_types = m['feature_types']
            headers = [f'x_{i + self.current_feature_idx}' for i in range(feature_num)]
            if m['role'] == 'promoter':
                headers.insert(0, 'y')
                target = True
            headers.insert(0, 'id')
            fw.write(','.join(headers) + '\n')

            for idx in range(self.same_num):
                row = [str(idx + 1)]
                if target:
                    row.append(str(random.randint(0, 1)))
                for i in range(feature_num):
                    if i < len(feature_types):
                        row.append(str(self.custom_func(feature_types[i])))
                    else:
                        row.append(str(self.get_random(10)))
                fw.write(','.join(row) + '\n')
                self.pbar.update(1)

            remainder = self.samples_num - self.same_num
            if remainder > 0:
                for idx in range(remainder):
                    row = [str(self.same_num + self.current_sample_idx + idx + 1)]
                    if target:
                        row.append(str(random.randint(0, 1)))
                    for i in range(feature_num):
                        if i < len(feature_types):
                            row.append(str(self.custom_func(feature_types[i])))
                        else:
                            row.append(str(self.get_random(10)))
                    fw.write(','.join(row) + '\n')
                    self.pbar.update(1)
                self.current_sample_idx += remainder

            self.current_feature_idx += feature_num
            fw.close()
        self.pbar.close()


class HorzDataset(BaseDataset):
    def __init__(self, name, path, members_num, feature_count, feature_types, samples_num=500):
        self.members_num = members_num
        self.feature_count = feature_count
        self.feature_types = feature_types
        sum_rows = samples_num * members_num
        super(HorzDataset, self).__init__(name, path, samples_num, sum_rows)

        self.headers = [f'x_{i}' for i in range(feature_count)]
        self.headers.insert(0, 'y')
        self.headers.insert(0, 'id')
        self.desc.update({"members_num": members_num, "mode": "横向"})
        self.set_desc()

    def set_desc(self):
        feature_types_li = []
        for t in self.feature_types:
            ft = t.split('-')[0]
            if ft not in feature_types_li:
                feature_types_li.append(ft)
        self.desc.update(
            {"desc": f"特征维度：{self.feature_count}, 特征类型：{feature_types_li}"})

    def run(self):
        for mem_id in range(self.members_num):

            dataset_name = os.path.join(self.dirname, f"{self.name}-y-{mem_id}.csv")
            self.compress_li.append(dataset_name)
            fw = open(dataset_name, 'w', encoding='utf8')
            fw.write(','.join(self.headers) + '\n')

            for idx in range(self.samples_num):
                row = [str(idx + 1)]
                row.append(str(random.randint(0, 1)))
                for i in range(self.feature_count):
                    if i < len(self.feature_types):
                        row.append(str(self.custom_func(self.feature_types[i])))
                    else:
                        row.append(str(self.get_random(10)))
                fw.write(','.join(row) + '\n')
                self.pbar.update(1)

        self.pbar.close()


if __name__ == '__main__':
    path1 = r"/root/ark/datasets"

    members_li = [
        {"feature_count": 12, "role": "promoter",
         "feature_types": ["int-1-2", "float-0.11-2.22"]},
        {"feature_count": 7, "role": "provider",
         "feature_types": ["int-5-12", "float-3.7-10.4"]},
        {"feature_count": 9, "role": "provider",
         "feature_types": ["int-14-54", "float-55.9-987.9"]}
    ]
    vd = VertDataset("feature", path1, members_li, 400, 0.8)
    vd.run()
    # vd.export()
    print(vd.desc)
    exit(34)

    hd = HorzDataset("horz-feature3", path1, 2, 23, ["int-1-2", "float-0.2-0.9", "str-high-low"], 10 ** 4)
    hd.run()
    # hd.export()
    # print(hd.desc)
