import  torch
from    torch import nn, optim, autograd
import  numpy as np
import pandas as pd
import os
import datetime
from copy import deepcopy
from torch.nn import functional as F
from    matplotlib import pyplot as plt
import  random
from sklearn.preprocessing import MinMaxScaler
from sklearn import manifold
import xgboost_am
import xgboost_dmax
import fig
import time

def load_data(X, path):
    data = pd.read_csv(path, delimiter=r",")
    values = data.values[:, 0:1].ravel()
    for data_idx in range(len(values)):
        alloy = []
        alpha_st, num_st = -1, -1
        value = values[data_idx]
        leng = len(value)
        for i in range(leng):
            if value[i].isalpha() :
                if alpha_st == -1:
                    alpha_st = i
                if num_st != -1 :
                    a = value[num_st:i]
                    alloy.append(float(a))
                    num_st = -1
                if i == leng-1:
                    alloy.append(value[num_st:leng])
            else:
                if num_st == -1:
                    num_st = i
                if alpha_st != -1:
                    a = value[alpha_st:i]
                    alloy.append(a)
                    alpha_st = -1

        alloy.append(float(value[num_st:leng]))
        node = np.zeros((100, 1))
        used_idx = []

        name = alloy[0: -1: 2]
        name = ''.join(name)
        alloy_system_list[name] = 1.0

        #only use ternary as training data
        # if len(alloy) > 6:
        #     continue

        for i in range(0, len(alloy), 2):
            idx = elements.get(alloy[i], len(elements))
            if idx == len(elements):
                elements[alloy[i]] = idx
            used_idx.append(idx)
            if only_alloy_system:
                node[idx] = 1.0 if alloy[i + 1] > 0.0 else 0.0
            else:
                node[idx] = alloy[i+1]

        X.append(node.ravel())
    return X

def generate_dataset():
    X = []
    X = load_data(X, DATA_PATH + "1877-am.csv")
    if not only_1888:
        file_name = os.listdir(DATA_PATH)
        file_name.sort()
        for i in range(len(file_name)):
    #        print(file_name[i])
            X = load_data(X, DATA_PATH+file_name[i])
            # print('loaded data samples')
            # print(len(X))

    scaler = MinMaxScaler(feature_range=(0, 1))
    X = scaler.fit_transform(X)
    # X = np.array(X) / 100.0
    print('loaded data samples')
    print(X.shape)
    print(len(alloy_system_list))
    print(alloy_system_list)
    return X[:, 0:len(elements)]

def split_test_alloy_system(X, test_alloy_system):
    #得到alloy system对应的编号
    #遍历X 分离出来x_train, x_test
    test_alloy_system_idx = []
    for e in test_alloy_system:
        test_alloy_system_idx.append(elements[e])

    x_train, x_test = [], []
    for alloy_item in X:
        if np.count_nonzero(alloy_item) == len(test_alloy_system_idx):
            tag = True
            for eidx in test_alloy_system_idx:
                if alloy_item[eidx] == 0:
                    tag = False
                    break
            if tag:
                x_test.append(alloy_item)
            else:
                x_train.append(alloy_item)
        else:
            x_train.append(alloy_item)
    return np.array(x_train), np.array(x_test)

class Generator(nn.Module):

    def __init__(self):
        super(Generator, self).__init__()

        def block(in_feat, out_feat, normalize=True):
            layers = [nn.Linear(in_feat, out_feat)]
            if normalize:
                layers.append(nn.BatchNorm1d(out_feat, 0.8))
            layers.append(nn.LeakyReLU(0.02, inplace=True))
            return layers

        self.net = nn.Sequential(
            *block(nz, 512, normalize=False),
            *block(512, 1024),
            *block(1024, 512),
            nn.Linear(512, nz),
            nn.Sigmoid(),
        )

    def forward(self, z):
        output = self.net(z)
        return output


class Discriminator(nn.Module):

    def __init__(self):
        super(Discriminator, self).__init__()

        self.net = nn.Sequential(
            nn.Linear(nz, 512),
            nn.LeakyReLU(0.02, inplace=True),
            nn.Linear(512, 1024),
            nn.LeakyReLU(0.02, inplace=True),
            nn.Linear(1024, 512),
            nn.LeakyReLU(0.02, inplace=True),
            nn.Linear(512, 1),
            nn.Sigmoid()
        )

    def forward(self, x):
        output = self.net(x)
        return output.view(-1)

def weights_init(m):
    if isinstance(m, nn.Linear):
        # m.weight.data.normal_(0.0, 0.02)
        nn.init.kaiming_normal_(m.weight)
        m.bias.data.fill_(0)

def gradient_penalty(D, xr, xf):
    """
    :param D:
    :param xr:
    :param xf:
    :return:
    """
    LAMBDA = 0.3

    # only constrait for Discriminator
    xf = xf.detach()
    xr = xr.detach()

    # [b, 1] => [b, 2]
    alpha = torch.rand(batchsz, 1)
    alpha = alpha.expand_as(xr)

    interpolates = alpha * xr + ((1 - alpha) * xf)
    interpolates.requires_grad_()

    disc_interpolates = D(interpolates)
    gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
                              grad_outputs=torch.ones_like(disc_interpolates),
                              create_graph=True, retain_graph=True, only_inputs=True)[0]
    gp = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
    return gp

def train_model(X):
    #prepare data
    G = Generator()
    D = Discriminator()
    G.apply(weights_init)
    D.apply(weights_init)

    optim_G = optim.Adam(G.parameters(), lr=1e-3, betas=(0.5, 0.9))
    optim_D = optim.Adam(D.parameters(), lr=1e-3, betas=(0.5, 0.9))
    starttime = datetime.datetime.now()
    for epoch in range(Loops):
        # 1. train discriminator for k steps
        for _ in range(5):
            idx = np.random.randint(1, X.shape[0], batchsz)
            xr = torch.tensor(X[idx, :], dtype=torch.float)

            # [b]
            predr = (D(xr))
            # max log(lossr)
            lossr = - (predr.mean())

            # [b, 2]
            z = torch.randn(batchsz, nz)  # 100
            # stop gradient on G
            xf = G(z).detach()
            predf = (D(xf))
            # min predf
            lossf = (predf.mean())

            # gradient penalty
            gp = gradient_penalty(D, xr, xf)

            loss_D = lossr + lossf + gp
            optim_D.zero_grad()
            loss_D.backward()
            # for p in D.parameters():
            #     print(p.grad.norm())
            optim_D.step()

        # 2. train Generator
        z = torch.randn(batchsz, nz)
        xf = G(z)
        predf = (D(xf))
        # max predf
        loss_G = - (predf.mean())
        optim_G.zero_grad()
        loss_G.backward()
        optim_G.step()

        if epoch % 200 == 0:
            print(f'D loss {loss_D.item()}, G loss {loss_G.item()}')

    endtime = datetime.datetime.now()
    print(endtime - starttime)

    torch.save(G, 'data/g_model')
    torch.save(G, 'data/d_model')
    return G, D

def valid_metal(xg, xt):
    non_metal = ['B', 'P', 'S', 'Si', 'C', 'F', 'H']

    non_metal_idx = []
    for e in non_metal:
        non_metal_idx.append(elements[e])
    print(elements)
    print(non_metal_idx)

    def non_metal_statistics(X):
        non_metal_list = [[], [], [], [], [], [], [], []]
        X_new = X[:, non_metal_idx]

        print(len(non_metal_list))
        for alloy in X_new:
            if alloy.max() > 0:
                for i in range(len(alloy)):
                    if alloy[i] > 0.0:
                        non_metal_list[i].append(alloy[i])
                non_metal_list[len(alloy)].append(alloy.sum())

        for nmlist in non_metal_list:
            if len(nmlist) == 0:
                print(f'0 0 0')
            else:
                nmlist_np = np.array(nmlist)
                avg = nmlist_np.sum()/len(nmlist_np)
                print(f'{avg} {nmlist_np.max()} {nmlist_np.min()}')

    non_metal_statistics(xt)
    non_metal_statistics(xg)
    fig.group_histogram1()

#X2中有多少X1中的数据
def compare(X1, X2):
    start = time.time()
    self_cmp = len(X1)==len(X2)

    print_list = [10000, 15000, 20000, 25000, 30000, 35000, 40000, 50000]
    x1_nonzero = []
    for i in range(len(X1)):
        alloy = X1[i]
        x1_nonzero.append(np.count_nonzero(alloy))

    same_alloy_system = 0
    same_alloy = 0
    for i_g in range(len(X2)):
        same_alloy_system_tag = False
        same_alloy_tag = False
        st = i_g + 1 if self_cmp else 0
        for i_t in range(st, len(X1)):
            #非0元素数量相同
            if i_t == i_g:
                continue
            alloy_g = X2[i_g]
            alloy_t = X1[i_t]

            x2_nonzero_alloy = np.count_nonzero(alloy_g)
            if x2_nonzero_alloy == x1_nonzero[i_t]:
                if x2_nonzero_alloy == np.count_nonzero(np.multiply(alloy_g, alloy_t)):
                    same_alloy_system_tag = True
                    same_alloy_tag = True
                    for i in range(len(alloy_t)):
                        if abs(alloy_t[i] - alloy_g[i]) > 0.001:
                            same_alloy_tag = False
                            break

        if same_alloy_tag:
            same_alloy = same_alloy + 1
        if same_alloy_system_tag:
            same_alloy_system = same_alloy_system+1

        if i_g+1 in print_list or i_g+1==len(X2):
            dif_alloy = i_g - same_alloy
            dif_alloy_sys = i_g - same_alloy_system
            print(f'Total G data {i_g}, different system {dif_alloy_sys}, different {dif_alloy}')
            print(f'different alloy system {dif_alloy_sys/i_g*100}%, different alloy {dif_alloy/i_g*100}%')
            print("运行时间：", time.time() - start, "秒")                                                            

#统计数据里边几元的数据分别有多少
def data_statistics(xf):
    element_num_list = np.zeros(nz)
    elist = []
    for key in elements:
        elist.append(key)
    for alloy in xf:
        alloy_name = []
        alloy_comp = []
        for i in range(len(alloy)):
            if alloy[i] > 0.005:
                alloy_name.append(elist[i])
                alloy_comp.append(alloy[i])
        alloy_comp = np.array(alloy_comp)
        sum = alloy_comp.sum()
        alloy_comp = alloy_comp / sum
        comp_sort = dict()
        for i in range(len(alloy_comp)):
            if alloy_comp[i] > 0.005: #最好找找误差
                comp_sort[alloy_name[i]]=alloy_comp[i]
                #print(f'{alloy_name[i]}  {alloy_comp[i]} ', end=' ')
        element_num_list[len(comp_sort)] = element_num_list[len(comp_sort)] + 1
        #print(sorted(comp_sort.items(), key=lambda d: d[1], reverse=True))
    print(f'generated alloy {element_num_list.astype(np.uint32)}')

#查看训练数据和生成数据包含指定元素的alloy的比例,
def write_element_composition_csv(xt, xg):
    def element_composition(X):
        alloy_system_t = np.zeros(nz)
        target_idx = []
        for e in specified_elements:
            target_idx.append(elements[e])
        print(alloy_system_t)
        res = np.zeros(len(X))
        for i in range(len(X)):
            for idx in target_idx:
                res[i]=res[i]+X[i][idx]
        print(f'{specified_elements} in each alloy system {res}')
        return res

    xt_spec = element_composition(xt)
    xg_spec = element_composition(xg)

    print(xt_spec.shape)
    print(xg_spec.shape)
    x = np.hstack((xt_spec, xg_spec)).reshape(-1, 1)
    y = np.hstack((np.full(len(xt_spec), 'Training'), np.full(len(xg_spec), 'Generated'))).reshape(-1, 1)
    z = np.full(len(xt_spec)+len(xg_spec), ''.join(specified_elements)).reshape(-1, 1)
    print(x.shape)
    print(y.shape)
    print(z.shape)

    xyz = np.hstack((np.hstack((x, y)), z))
    out_data = pd.DataFrame(data=xyz, columns=["Percentage", "Data source", "Element"])
    out_data.to_csv('pic/' + ''.join(specified_elements)+'.csv', index=False)


#从生成数据中挑选target size的特定alloy_system                                                                      
def generate_alloy_system_data(X, G, alloy_system, target_size):
    generated_target_alloy = []
    alloy_system_t = np.zeros(nz)
    for e in alloy_system:
        alloy_system_t[elements[e]] = 1
    print(alloy_system_t)
    print(elements)
    while len(generated_target_alloy) < target_size:
        z = torch.randn(12000, nz)
        xf = G(z).detach().numpy()
        for alloy in xf:
            for i in range(len(alloy)):
                if alloy[i] < 0.005:
                    alloy[i] = 0.0
            if np.count_nonzero(alloy_system_t) == np.count_nonzero(alloy):
                if np.count_nonzero(alloy_system_t) == np.count_nonzero(np.multiply(alloy_system_t, alloy)):
                    generated_target_alloy.append(alloy)
        if len(generated_target_alloy) == 0:
            print(f"{alloy_system} generate 12000 sample, no target alloy system")
            return

    data_statistics(generated_target_alloy)
    # Visualize the target sample distribution of alloy system
    if len(alloy_system) != -1:
        idx_g = []
        for i in range(0, len(alloy_system)):
            idx_g.append(elements[alloy_system[i]])
        x_g = np.array(generated_target_alloy)[:, idx_g]

        true_alloy = []
        for alloy in X:
            if np.count_nonzero(alloy_system_t) == np.count_nonzero(alloy):
                if np.count_nonzero(alloy_system_t) == np.count_nonzero(np.multiply(alloy_system_t, alloy)):
                    true_alloy.append(alloy)
        x_t = np.array(true_alloy)[:, idx_g]

        if len(alloy_system) == 3:
            random.shuffle(x_g)
            alloy_system.append('-test')
            fig.ternary_scatter_px(x_t, x_g[:target_size], alloy_system)

        # X_comb = np.vstack((x_t, x_g))
        # tsne = manifold.TSNE(n_components=2, init='pca', random_state=2022)
        # X_fig = tsne.fit_transform(X_comb)
        # fig.scatter2d(X_fig[:, 0], X_fig[:, 1], x_t.shape[0], ''.join(alloy_system), True)

def set_elements():
    data = pd.read_csv("data/elements.csv", delimiter=r",")
    A = data.values[:, 0:1].ravel()
    B = data.values[:, -1].ravel()
    for i in range(len(A)):
        elements[A[i]] = B[i]

def post_process(xf):
    for alloy in xf:
        for i in range(len(alloy)):
            if alloy[i] < 0.005:
                alloy[i] = 0.0
            elif only_alloy_system:
                alloy[i] = 1.0
    return xf

#测试生成新的alloy system的能力
def test_new_alloy_system(X):
    target_alloy_systems = np.array([
        ['Cu','Ti','Zr'], ['B','Co','Fe']
    ])
    # alloy_system_data = pd.read_csv("data/ternary_alloy_system.csv", delimiter=r",")
    # target_alloy_systems = alloy_system_data.values[:, :]
    for target_alloy_system in target_alloy_systems:
        X, x_test = split_test_alloy_system(X, target_alloy_system)
        G, D = train_model(X)
        generate_alloy_system_data(x_test, G, target_alloy_system.tolist(), 200)

def normal_test(X):
    if Trained:
        G = torch.load('data/g_model')
        D = torch.load('data/d_model')
    else:
        G, D = train_model(X)

    z = torch.randn(G_size, nz)
    xf = G(z).detach().numpy()
    xf = post_process(xf)

    xgboost_am.predict(xf)
    xgboost_dmax.predict(xf)

    # valid_metal(xf, X)
    #generate_alloy_system_data(X, G, ['B', 'Co', 'Mn'], 200)
    # generate_alloy_system_data(X, G, ['Al', 'Ni', 'Zr'], 200)
    # generate_alloy_system_data(X, G, ['Mg', 'Nd', 'Ni'], 200)

    # fig.line([], [], [], [])
    # compare(X, xf)

    data_statistics(xf)
    data_statistics(X)

    if Vis:
        X_comb = np.vstack((X, xf))
        # tsne = manifold.TSNE(n_components=3, init='pca', random_state=0)
        # X_fig = tsne.fit_transform(X_comb)
        # fig.scatter3d(X_fig[:, 0], X_fig[:, 1], X_fig[:, 2], X.shape[0])

        tsne = manifold.TSNE(n_components=2, init='pca', random_state=2022)
        X_fig = tsne.fit_transform(X_comb)
        fig.scatter2d(X_fig[:, 0], X_fig[:, 1], X.shape[0], pic_label, False)

device = "cpu" #torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
h_dim = 300
nz = 57
batchsz = 256
elements = dict()
alloy_system_list = dict()

Vis = False
Trained = True
only_1888 = False
G_size = 8000
Loops = 3000
pic_label = 'alloy_system_1888_1000'
only_alloy_system = False
DATA_PATH = "data/gan/"

def main():
    set_elements()
    X = generate_dataset()
    normal_test(X)
    #test_new_alloy_system(X)


if __name__ == '__main__':
    main()









