import torch
import torch.nn as nn
from torch.optim import Adam
from torch.nn import CrossEntropyLoss
import random
from torch.utils.data import Dataset, DataLoader
from collections import Counter
from sklearn import tree
from sklearn.model_selection import train_test_split
import time
import numpy as np
import graphviz
from tqdm import tqdm


seed = 666
torch.manual_seed(seed)
random.seed(seed)


def getLabel(a, b, c):
    if a > 0:
        if c < 0:
            return 3
        elif c == 0:
            if b == 0:
                return 0
            elif b > 0:
                return 1
            else:
                return 2
        else:
            if b == 0:
                return 0
            elif b > 0:
                if b ** 2 - 4 * a * c <= 0:
                    return 0
                else:
                    return 1
            else:
                if b ** 2 - 4 * a * c <= 0:
                    return 0
                else:
                    return 2
    elif a < 0:
        if c > 0:
            return 3
        elif c < 0:
            if b == 0:
                return 4
            elif b > 0:
                if b ** 2 - 4 * a * c <= 0:
                    return 4
                else:
                    return 5
            else:
                if b ** 2 - 4 * a * c <= 0:
                    return 4
                else:
                    return 6
        else:
            if b == 0:
                return 4
            elif b > 0:
                return 5
            else:
                return 6


def generateData(size):
    data_list = []
    label_list = []
    for i in range(size):
        a = random.randint(-100, 100)
        while a == 0:
            a = random.randint(-100, 100)
        b = 0 if random.random() < 0.1 else random.randint(-200, 200)
        c = 0 if random.random() < 0.1 else random.randint(-100, 100)
        data_list.append([a, b, c, b ** 2 - 4 * a * c])
        label_list.append(getLabel(a, b, c))
    print(Counter(label_list))
    return data_list, label_list


def generateSpecificData(upper_bound):
    data_list = []
    label_list = []
    for a in range(-upper_bound, upper_bound):
        if a == 0:
            continue
        for b in range(-4 * upper_bound, 4 * upper_bound):
            for c in range(- upper_bound, upper_bound):
                data_list.append([a, b, c, b ** 2 - 4 * a * c])
                label_list.append(getLabel(a, b, c))
    print(Counter(label_list))
    return data_list, label_list


class FunctionModel(nn.Module):
    def __init__(self):
        super(FunctionModel, self).__init__()
        self.liear1 = nn.Linear(4, 100)
        self.f = nn.Sigmoid()
        self.liear2 = nn.Linear(100, 7)

    def forward(self, x):
        return self.liear2(self.f(self.liear1(x)))


class MyDataSet(Dataset):
    def __init__(self, data, label):
        super(MyDataSet, self).__init__()
        self.data = data
        self.label = label

    def __getitem__(self, item):
        return self.data[item], self.label[item]

    def __len__(self):
        return len(self.data)


if __name__ == '__main__':
    # 深度学习方法
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    train_x, train_y = generateData(50000)
    train_x = torch.tensor(train_x, dtype=torch.float, device=device)
    train_y = torch.tensor(train_y, dtype=torch.long, device=device)
    test_x, test_y = generateData(20000)
    test_x = torch.tensor(test_x, dtype=torch.float, device=device)
    test_y = torch.tensor(test_y, dtype=torch.long, device=device)
    dataset = MyDataSet(train_x, train_y)
    dataloader = DataLoader(dataset=dataset, batch_size=128, shuffle=True)
    model = FunctionModel().to(device)
    criterion = CrossEntropyLoss()
    optimizer = Adam(model.parameters(), lr=1e-3)
    start_time = time.time()
    for epoch in tqdm(range(100)):
        for batch, (batch_x, batch_y) in enumerate(dataloader):
            predict_y = model(batch_x)
            optimizer.zero_grad()
            loss = criterion(predict_y, batch_y)
            # if batch % 200 == 0:
            #     print(loss.item())
            loss.backward()
            optimizer.step()

    with torch.no_grad():
        predict_y = model(test_x)
        predict_label = predict_y.max(1)[1]
        print("accuracy:{}".format(torch.sum(predict_y.max(1)[1]==test_y).item() / len(test_y)))

    end_time = time.time()
    print("gpu time cost:", end_time - start_time, 's')

    # 决策树
    x, y = generateData(100000)
    x = np.array(x, dtype=float)
    y = np.array(y, dtype=np.compat.long)
    train_x, test_x, train_y, test_y = train_test_split(x, y, test_size=0.3)
    clf = tree.DecisionTreeClassifier()
    clf = clf.fit(train_x, train_y)
    score = clf.score(test_x, test_y)
    print(score)
    dot_data = tree.export_graphviz(clf,
                                    feature_names=['a', 'b', 'c', 'b^2-4ac'],
                                    class_names=['一二象限','一二三象限','一二四象限','一二三四象限','三四象限','一三四象限','二三四象限'],
                                    filled=True,
                                    rounded=True)
    graph = graphviz.Source(dot_data)
    graph.render(r'/home/zh/lyq/nlp/project/quadraticFunction/tree')
    print(clf.feature_importances_)









