import codecs
import math
import pickle
import random
import sys
from time import time
from turtle import forward
from matplotlib.style import use
import numpy as np
import torch
from torch.utils.data import Dataset,DataLoader
import torch.nn as nn

class embedding_net(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.lut = nn.Embedding(len(item2id),LOAD.con.word_emb_size)
        self.word_embsize = LOAD.con.word_emb_size
    def forward(self,x):
        return self.lut(x)*math.sqrt(self.word_embsize)
class net_no_crf(nn.Module):
    def __init__(self):
        super().__init__()
        self.Sigmoid = nn.Sigmoid()
        self.conv1_left = nn.Conv1d(in_channels=50,out_channels=50,kernel_size=3,padding=1)
        self.conv1_right = nn.Conv1d(in_channels=50,out_channels=50,kernel_size=3,padding=1)
        self.conv2_left = nn.Conv1d(in_channels=50,out_channels=50,kernel_size=3,padding=1)
        self.conv2_right = nn.Conv1d(in_channels=50,out_channels=50,kernel_size=3,padding=1)
        self.conv3_left = nn.Conv1d(in_channels=50,out_channels=50,kernel_size=3,padding=1)
        self.conv3_right = nn.Conv1d(in_channels=50,out_channels=50,kernel_size=3,padding=1)
        self.end = nn.Conv1d(in_channels=50,out_channels=4,kernel_size=1)
    def forward(self,input):
        output1_l = self.conv1_left(input)
        output1_r = self.conv1_right(input)
        output1_r = self.Sigmoid(output1_r)
        output1 = output1_l*output1_r
        
        output2_l = self.conv2_left(output1)
        output2_r = self.conv2_right(output1)
        output1_r = self.Sigmoid(output2_r)
        output2 = output2_l*output2_r
        
        output3_l = self.conv3_left(output2)
        output3_r = self.conv3_right(output2)
        output1_r = self.Sigmoid(output3_r)
        output3 = output3_l*output3_r
        output = self.end(output3)
        return output

LOAD = __import__('load')
train_data,test_data = (LOAD.read_train_file(codecs.open(LOAD.con.training_path,'r','utf8'),word_window = LOAD.con.word_window),
                        LOAD.read_train_file(codecs.open(LOAD.con.test_path,'r','utf8'),word_window = LOAD.con.word_window))
item2id, id2item = LOAD.create_mapping(LOAD.create_dic(train_data[0], add_unk=True, add_pad=True))
tag2id, id2tag = LOAD.create_mapping(LOAD.create_dic(train_data[-1]))
###########################################################################
load_file = open('data/convertchart.data','wb')
pickle.dump([item2id,id2item,tag2id,id2tag],file=load_file)
load_file.close()
print('data load complete')
#######################   init word embedding    #########################

net_emb = embedding_net()
idx_embedding = []
idx_tag = []
for word_list in train_data[0]:
    id_list = []
    for word in word_list:
        if word in item2id:
            id_list.append(item2id[word])
    id_list = torch.LongTensor(id_list)
    id_list = net_emb(id_list)
    idx_embedding.append(id_list)
    
for tag_list in train_data[-1]:
    id_list = []
    for tag in tag_list:
        if tag in tag2id:
            id_list.append(tag2id[tag])
    id_list = torch.tensor(id_list)
    idx_tag.append(id_list)
#################################  ###############################################
print('embedding load complete')
##############################################################################
class wordsets(Dataset):
    def __init__(self) -> None:
        super().__init__()
        self.data_word = idx_embedding
        self.data_tag  = idx_tag
    
    def __getitem__(self, index):
        return self.data_word[index],self.data_tag[index]
    def __len__(self):
        return len(idx_embedding)
train_set = wordsets()
train_loader = DataLoader(train_set,shuffle=True)
len_train = len(train_set)

 
net_N = net_no_crf()
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net_N.parameters(),lr= 0.01)
net_N = net_N.cuda()
print('start training plz wait')
for i in range(LOAD.con.max_epoches):
    start_time = time()
    for j,(words_id,tags_id )in enumerate(train_loader): 
        net_N.zero_grad()  
        optimizer.zero_grad()
        words_id = words_id.permute(0,2,1)
        words_id = words_id.cuda()
        tags_id= tags_id.cuda()
        if words_id.shape[2] >=3:
            predict = net_N(words_id)
            loss = criterion(predict,tags_id)
            loss.backward(retain_graph = True)
            optimizer.step()
        if j%500==0:
            print('sentence ',j,'/',len_train,' complete ',j/(time()-start_time),' sentences per second')
        
    end_time = time()
    used_time = end_time - start_time
    start_time = time()
    print('epoch '+str(i)+' complete,used '+str(used_time)+'s')

torch.save(net_N,'net_N.pt')
torch.save(net_emb,'net_emb.pt')
print('model train complete')




    


 
        
        
        
        
