import numpy as np
import numpy.random as random
import torch
from torch import optim
from torch.utils.data import DataLoader, TensorDataset

from Model import *

loss_func = loss_function
dev = torch.device("cuda")


def K_means(K, input, u):
    """This function is designed for divide the input data into K classes"""
    # initialize the centroid of the data
    c = np.zeros(input.shape[0], dtype=np.int)
    changed = True
    while changed:
        # calculate which class this data belonged to
        for i, data in enumerate(input):
            odds = np.zeros(K, dtype=np.float)
            for j in range(K):
                odds[j] = (data[0] - u[j]) ** 2
            c[i] = np.argmin(odds)
        changed = False
        # update the centroid the class
        for j in range(K):
            upper = 0.0
            lower = 0.0
            for i, data in enumerate(input):
                if c[i] == j:
                    upper += data[0]
                    lower += 1
            if lower == 0:
                u[j] = 0
            else:
                centroid = upper / lower
                if centroid != u[j]:
                    changed = True
                    u[j] = centroid

    return c


def quick_sort(u, cluster, start, end):
    # 分治 一分为二
    # start=end ,证明要处理的数据只有一个
    # start>end ,证明右边没有数据
    if start >= end:
        return
    # 定义两个游标，分别指向0和末尾位置
    left = start
    right = end
    # 把0位置的数据，认为是中间值
    mid = u[left]
    midu = cluster[left]
    while left < right:
        # 让右边游标往左移动，目的是找到小于mid的值，放到left游标位置
        while left < right and u[right] >= mid:
            right -= 1
        u[left] = u[right]
        cluster[left] = cluster[right]
        # 让左边游标往右移动，目的是找到大于mid的值，放到right游标位置
        while left < right and u[left] < mid:
            left += 1
        u[right] = u[left]
        cluster[right] = cluster[left]
    # while结束后，把mid放到中间位置，left=right
    u[left] = mid
    cluster[left] = midu
    # 递归处理左边的数据
    quick_sort(u, cluster, start, left - 1)
    # 递归处理右边的数据
    quick_sort(u, cluster, left + 1, end)


def train_one_model(index, data, append=False):
    x = []
    y = []
    for key, val in data:
        x.append(key)
        y.append(val)

    x = torch.tensor(x, dtype=torch.float)
    y = torch.tensor(y, dtype=torch.float)
    x = x.view(x.shape[0], -1).to(dev)
    y = y.view(y.shape[0], -1).to(dev)

    epochs = 100
    dataset = TensorDataset(x, y)
    model = LinearModel(D_in=1, D_out=1).to(dev)
    if append:
        model.load_state_dict(torch.load(r"D:\PySpace\Learned_Index\models\model{}.pkl".format(index)))
        epochs = 50
    opt = optim.Adam(model.parameters(), lr=0.01)
    train_dl = DataLoader(dataset, batch_size=1000, shuffle=True)
    for epoch in range(epochs):
        model.train()
        for xb, yb in train_dl:
            loss = loss_func(model(xb), yb)
            print(loss)
            loss.sum().backward()
            opt.step()
            opt.zero_grad()
        # model.eval()
    torch.save(model.state_dict(), r"D:\PySpace\Learned_Index\models\model{}.pkl".format(index))


class Classifier(object):
    def __init__(self, k):
        self.k = k
        self.cluster = np.empty(self.k, dtype=object)
        self.u = np.zeros(self.k, dtype=np.float)

    def read_from_mem(self):
        with open(r"D:\ep\classifier.txt", "r") as f:
            data = f.readlines()
        self.k = int(data[0])
        self.cluster = np.empty(self.k, dtype=object)
        self.u = np.zeros(self.k, dtype=np.float)
        for i in range(self.k):
            self.cluster[i] = []
            self.u[i] = float(data[i + 1])
        data = data[self.k + 1:]
        for i in range(self.k):
            length = int(data[0])
            for j in range(length):
                key_value = data[j + 1].split()
                k, v = float(key_value[0]), float(key_value[1])
                self.cluster[i].append([k, v])
            data = data[length + 1:]
        # self.print_info()

    def print_info(self):
        print(self.k)
        for i in range(self.k):
            print(self.u[i])
        for i in range(self.k):
            print(len(self.cluster[i]))

    def write2mem(self):
        with open(r"D:\ep\classifier.txt", "w") as f:
            f.write(str(self.k) + "\n")
            for i in range(self.k):
                f.write(str(self.u[i]) + "\n")
            for i in range(self.k):
                f.write(str(len(self.cluster[i])) + "\n")
                for data in self.cluster[i]:
                    f.write(str(data[0]) + "\t" + str(data[1]) + "\n")

    def init_clusters(self, input):
        indexes = random.randint(0, input.shape[0], size=self.k)
        for i in range(self.k):
            self.u[i] = input[indexes[i]][0]
        dclass = K_means(self.k, input, self.u)
        for i in range(self.k):
            self.cluster[i] = []
        for i, data in enumerate(input):
            self.cluster[dclass[i]].append([data[0], data[1]])
        self.resort()
        print(self.cluster)

    def resort(self):
        quick_sort(self.u, self.cluster, 0, self.k - 1)

    def get_class(self, key):
        dis = np.zeros(self.k, dtype=float)
        for i in range(self.k):
            dis[i] = (key - self.u[i]) ** 2
        return np.argmin(dis)

    def get_cluster(self):
        return self.cluster

    def retrain_classifier(self, index):
        col = [row[0] for row in self.cluster[index]]
        self.u[index] = float(sum(col)) / len(self.cluster[index])
        self.resort()
        train_one_model(index, self.cluster[index], True)

    def insert_key_value(self, key, val):
        index = self.get_class(key)
        self.cluster[index].append([key, val])
        self.retrain_classifier(index)
        return index
