import numpy as np
from torch import nn
from torch.utils import data
import torch
import jieba


# 加载词表
def build_vocab(vocab_path):
    vocab = {'pad':0}
    with open(vocab_path,'r',encoding='utf-8') as f:
        for index,line in enumerate(f):
            vocab[line.strip()] = index+1
    vocab['unk'] = len(vocab)
    return vocab
def sentence_to_sequence(sentence, vocab):
    return [ vocab.get(char,vocab['unk']) for char in sentence]

def sentence_to_label(sentence):
    words = jieba.cut(sentence)
    count = 0
    labels = [0]*len(sentence)
    for word in words:
        count += len(word)
        labels[count-1] = 1
    return labels

# 生成训练数据集
class MyDataSet(data.Dataset):
    def __init__(self,vocab,corpus_path,max_length):
        self.vocab = vocab
        self.corpus_path = corpus_path
        self.max_length = max_length
        self.init()

    def init(self):
        self.data = []
        with open(self.corpus_path,'r',encoding='utf-8') as f:
            for line in f:
                sequence = sentence_to_sequence(line,self.vocab)
                labels = sentence_to_label(line)
                sequence,labels = self.padding(sequence,labels)
                sequence = torch.LongTensor(sequence)
                labels = torch.LongTensor(labels)
                self.data.append([sequence,labels])
                if len(self.data) >10000:
                    break

    def __len__(self):
        return len(self.data)
    def __getitem__(self, index):
        return self.data[index]
    # 根据maxlength 切割或者补
    def padding(self,sequence,labels):
        sequence = sequence[:self.max_length]
        sequence = sequence+ [0]*(self.max_length-len(sequence))
        labels = labels[:self.max_length]
        labels = labels + [-100]*(self.max_length-len(labels))
        return sequence,labels

class MyModel(nn.Module):
    def __init__(self,vocab,hidden_size,rnn_hidden_size,num_layers):
        super(MyModel,self).__init__()
        self.eme = nn.Embedding(len(vocab),hidden_size,padding_idx=0)
        self.rnn = nn.RNN(hidden_size,rnn_hidden_size,num_layers,batch_first=True)
        self.linear = nn.Linear(rnn_hidden_size,2)
        self.loss = nn.functional.cross_entropy
    def forward(self,X,Y=None):
        y_pred = self.eme(X)
        y_pred,_ = self.rnn(y_pred)
        y_pred = self.linear(y_pred)
        if Y is not None:
            return self.loss(y_pred.view(-1,2),Y.view(-1),ignore_index=-100)
        else:
            return y_pred

def main():
    epoch_num = 10 # 训练轮数
    lr = 1e-3
    batch_size = 20
    hidden_size = 512
    rnn_hidden_size = 1024
    num_layers = 2
    corpus_path = 'corpus.txt'
    max_length = 10
    vocab = build_vocab("chars.txt")
    dataSet = MyDataSet(vocab,corpus_path,max_length)
    dataloader = data.DataLoader(dataSet,batch_size=batch_size,shuffle=True)
    model = MyModel(vocab, hidden_size, rnn_hidden_size, num_layers)
    if torch.cuda.is_available():
        model.cuda()
        model.device = torch.device('cuda:0')
    else:
        model.device = torch.device('cpu')
    adam = torch.optim.Adam(model.parameters(),lr=lr)
    for epoch in range(epoch_num):
        model.train()
        watch_loss = []
        for x,y in dataloader:
            x=x.to(model.device)
            y=y.to(model.device)
            loss = model(x,y)
            adam.zero_grad()
            loss.backward()
            adam.step()
            watch_loss.append(loss.item())
        print("=========\n第%d轮平均loss:%f" % (epoch + 1, np.mean(watch_loss)))
    torch.save(model.state_dict(),'model.pth')

def predict(model_path, vocab_path, input_strings):
    #配置保持和训练时一致
    hidden_size = 512
    rnn_hidden_size = 1024
    num_layers = 2
    vocab = build_vocab(vocab_path)       #建立字表
    model = MyModel(vocab, hidden_size, rnn_hidden_size, num_layers)   #建立模型
    model.load_state_dict(torch.load(model_path))   #加载训练好的模型权重
    model.eval()
    for input_string in input_strings:
        #逐条预测
        x = sentence_to_sequence(input_string, vocab)
        with torch.no_grad():
            result = model.forward(torch.LongTensor([x]))[0]
            result = torch.argmax(result, dim=-1)  #预测出的01序列
            #在预测为1的地方切分，将切分后文本打印出来
            for index, p in enumerate(result):
                if p == 1:
                    print(input_string[index], end=" ")
                else:
                    print(input_string[index], end="")
            print()
if __name__ == '__main__':
    #main()
    #训练结束开始分词
    input_strings = ["同时，国内有望出台，新汽车刺激方案",
                     "沪胶后市有望延续强势！",
                     "经过两个交易日的强势调整后",
                     "昨日上海天然橡胶期货价格再度大幅上扬"]

    predict("model.pth", "chars.txt", input_strings)
