import torch as th
import torch.nn as nn
import torch.nn.functional as F
from feat_CNN import getFeat


class Linear(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(Linear, self).__init__()
        self.linear1 = nn.Linear(input_dim, 20)
        self.linear2 = nn.Linear(20, 20)
        self.linear3 = nn.Linear(20, output_dim)

    def forward(self, x):
        h = self.linear1(x)
        h = th.relu(h)
        h = self.linear2(h)
        h = th.relu(h)
        h = self.linear3(h)
        return h


class Model(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(Model, self).__init__()
        self.learning_rate = 0.3
        self.n_epoch = 100000  # 100000
        self.loss_fcn = nn.CrossEntropyLoss()
        self.net = Linear(input_dim, output_dim)
        self.optimizer = th.optim.Adam(self.net.parameters(), lr=self.learning_rate)

    def train(self, feature, label):
        for epoch in range(self.n_epoch):
            prediction = self.net(feature)
            '''print('prediction:')
            print(prediction)
            print('label:')
            print(label)'''
            loss = self.loss_fcn(prediction, label)

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()
            if epoch % 10 == 0:
                self.learning_rate = self.learning_rate * 10 / epoch

    def test(self, feature, label):
        prediction = self.net(feature)
        loss = self.loss_fcn(prediction.squeeze(), label)
        print('loss = {:.4f}'.format(loss))

    def exam(self, feature):
        prediction = self.net(feature).cpu()
        print('len = {}'.format(len(prediction)))
        file = open('submission.txt', 'w')
        for pre in prediction:
            if max(pre) == pre[0]:
                print('positive', file=file)
            elif max(pre) == pre[1]:
                print('neutral', file=file)
            else:
                print('negative', file=file)
            file.flush()
        file.close()


class textCNN(nn.Module):
    def __init__(self, embed_num, embed_dim, class_num, kernel_num, kernel_sizes, dropout):
        super(textCNN, self).__init__()

        Vocab = embed_num   # 已知词的数量
        Dim = embed_dim     # 每个词向量长度
        Cla = class_num     # 类别数
        Ci = 1              # 输入的channel数
        Knum = kernel_num   # 每种卷积核的数量
        Ks = kernel_sizes   # 卷积核list，形如[2,3,4]

        self.embed = nn.Embedding(Vocab, Dim)  # 词向量，这里直接随机

        self.convs = nn.ModuleList([nn.Conv2d(Ci, Knum, (K, Dim)) for K in Ks])  # 卷积层
        self.dropout = nn.Dropout(dropout)
        self.fc = nn.Linear(len(Ks) * Knum, Cla)  # 全连接层

    def forward(self, x):
        x = self.embed(x)  # (N,W,D)

        x = x.unsqueeze(1)  # (N,Ci,W,D)
        x = [F.relu(conv(x)).squeeze(3) for conv in self.convs]  # len(Ks)*(N,Knum,W)
        x = [F.max_pool1d(line, line.size(2)).squeeze(2) for line in x]  # len(Ks)*(N,Knum)

        x = th.cat(x, 1)  # (N,Knum*len(Ks))

        x = self.dropout(x)
        logit = self.fc(x)
        return logit


import os
import sys
import torch.autograd as autograd


def train(train_iter, model, cuda, device, lr, epochs):
    if cuda:
        model.cuda(device)

    optimizer = th.optim.Adam(model.parameters(), lr=lr)

    steps = 0
    best_acc = 0
    last_step = 0
    model.train()
    print('training...')
    for epoch in range(1, epochs + 1):
        for batch in train_iter:
            feature, target = batch.text, batch.labels  # (W,N) (N)
            feature.t_()

            if cuda:
                feature, target = feature.cuda(), target.cuda()

            optimizer.zero_grad()
            logit = model(feature)
            '''print('logit:')
            print(logit)
            print('target:')
            print(target)'''
            loss = F.cross_entropy(logit, target)
            loss.backward()
            optimizer.step()

            steps += 1
            if steps % 100 == 0:
                result = th.max(logit, 1)[1].view(target.size())
                corrects = (result.data == target.data).sum()
                accuracy = corrects * 100.0 / batch.batch_size
                sys.stdout.write('\rEpoch[{}] Batch[{}] - loss: {:.6f} acc: {:.4f}$({}/{})'.format(
                                                                                         epoch,
                                                                                         steps,
                                                                                         loss.data.item(),
                                                                                         accuracy,
                                                                                         corrects,
                                                                                         batch.batch_size))
            if epoch % 10 == 0:
                lr = lr * 10 / epoch
            '''if steps % args.dev_interval == 0:
                dev_acc = eval(dev_iter, model, args)
                if dev_acc > best_acc:
                    best_acc = dev_acc
                    last_step = steps
                    if args.save_best:
                        save(model, args.save_dir, 'best', steps)
                else:
                    if steps - last_step >= args.early_stop:
                        print('early stop by {} steps.'.format(args.early_stop))
            elif steps % args.save_interval == 0:
                save(model, args.save_dir, 'snapshot', steps)'''


'''def save(model, save_dir, save_prefix, steps):
    if not os.path.isdir(save_dir):
        os.makedirs(save_dir)
    save_prefix = os.path.join(save_dir,save_prefix)
    save_path = '{}_steps_{}.pt'.format(save_prefix,steps)
    torch.save(model.state_dict(),save_path)'''


def eval(data_iter, model, cuda):
    model.eval()
    corrects, avg_loss = 0, 0
    file = open('submission.txt', 'w')
    k = 0
    for batch in data_iter:
        feature = batch.text
        if k == 0:
            print(feature)
            k = 1
        feature.t_()

        if cuda:
            feature = feature.cuda()

        logit = model(feature)
        # print(logit)

        for pre in logit:
            if max(pre) == pre[0]:
                print('positive', file=file)
            elif max(pre) == pre[1]:
                print('neutral', file=file)
            else:
                print('negative', file=file)
            file.flush()

    file.close()


if __name__ == '__main__':
    train_iter, test_iter, embed_num, embed_dim = getFeat()
    model = textCNN(embed_num=embed_num,
                    embed_dim=embed_dim,
                    class_num=3,
                    kernel_num=3,
                    kernel_sizes=[2,3,4],
                    dropout=0.1)
    model.cuda(0)
    train(train_iter=train_iter,
          model=model,
          cuda=True,
          device=0,
          lr=0.05,
          epochs=5)
    eval(data_iter=test_iter,
         model=model,
         cuda=True)


    '''print('start import data...')
    feat, label ,test= getFeat()

    feat = th.tensor(feat).float().to(1)
    label = th.tensor(label).long().to(1)

    print('start training...')
    model = Model(len(feat[0]), 3)
    model.cuda(1)
    model.train(feat, label)


    test = th.tensor(test).float().to(1)
    print('start examing...')
    model.exam(test)'''


