import time

import numpy as np
import pandas as pd
import math
from sklearn.model_selection import train_test_split


# Data4logistics用于将数据集转换为逻辑回归模型所需的格式，
# 其中包括将数据集划分为训练集和测试集，并对数据进行归一化
class DataUtils:
	names = ['性别(Sex)', '长度(Length)', '直径(Diameter)', '高度(Height)', '整体重量(Whole Weight)',
	         '肉重量(Shucked Weight)',
	         '内脏重量(Viscera Weight)', '壳重量(Shell Weight)', '环(Rings)']
	size = 4177
	# 存储数据特征
	features = None
	# 存储数据标签
	label = None
	# 测试数据生成
	test_y = None
	# 测试数据占比 总数据4177
	test_point = 0.2
	# 测试数据大小
	test_size = 0

	@staticmethod
	def pretreatment():
		return pd.read_csv('abalone.csv', names=DataUtils.names)

	@staticmethod
	def compress_data(data):
		return data.replace([range(1, 9), range(9, 12), range(12, 30)], [1, 2, 3])

	@staticmethod
	def mend_lack_data(data, drop=False):
		if drop:
			return data.dropna()
		else:
			if np.any((pd.isnull(data) | pd.isna(data))):
				# 获取列索引
				columns = data.columns
				for column in columns:
					# 获取列数据
					col = data[column]
					if np.any((pd.isnull(col) | pd.isna(col))) and col.dtype != 'object':
						data.fillna(value=col.mean(), inplace=True)
					elif np.any((pd.isnull(col) | pd.isna(col))):
						data.dropna(inplace=True)
			return data

	@staticmethod
	def mend_type_data(data):
		columns = data.columns
		for column in columns:
			data[column] = data[column].replace(['M', 'F', 'I'], [1, 0, -1])
		return data

	@staticmethod
	def check_lack_data(data):
		return np.any(pd.isnull(data) | pd.isna(data))

	@staticmethod
	def check_type_data(data):
		for column in data.columns:
			if data[column].dtype == 'object':
				return True
		return False

	@staticmethod
	def splitData(data):
		DataUtils.features = data.iloc[:, :-1]
		DataUtils.label = data.iloc[:, -1]
		train_x, test_x, train_y, test_y = train_test_split(DataUtils.features, DataUtils.label, test_size=0.3,
		                                                    random_state=0)
		DataUtils.features = train_x
		DataUtils.label = train_y
		return DataUtils.features, DataUtils.label

	@staticmethod
	def create_dataset():
		data = []
		DataUtils.test_point = np.random.uniform(0.2, 0.4)
		# DataUtils.test_size = math.floor(DataUtils.test_point * DataUtils.size) + 1
		train_x, test_x, train_y, test_y = train_test_split(DataUtils.features, DataUtils.label,
		                                                    test_size=DataUtils.test_point,
		                                                    random_state=int(time.time()))
		DataUtils.test_size = len(test_x)
		DataUtils.test_y = test_y
		for i in range(len(test_x)):
			data_set = DataSet()
			data_set.sex = test_x.iloc[i, 0]
			data_set.length = test_x.iloc[i, 1]
			data_set.diameter = test_x.iloc[i, 2]
			data_set.height = test_x.iloc[i, 3]
			data_set.whole_weight = test_x.iloc[i, 4]
			data_set.shucked_weight = test_x.iloc[i, 5]
			data_set.viscera_weight = test_x.iloc[i, 6]
			data_set.shell_weight = test_x.iloc[i, 7]
			data.append(data_set)
		return data


class DataSet:
	sex = None
	length = None
	diameter = None
	height = None
	whole_weight = None
	shucked_weight = None
	viscera_weight = None
	shell_weight = None
	rings = None
