import pickle as pkl
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from tqdm import tqdm
import re

max_len = 32

def build_train_data(file_path):   #file_path = 'data/train.txt'
    with open(file_path,'r',encoding='utf-8') as f:
        lines = f.read()
    
    #合并空格
    lines = "".join(re.split(r"[\s]", lines))

    # 将读取到的文本按照标点符号和换行符进行切分，得到一个列表
    lines = re.split(r"[，。！？…；\n]",lines)
    

    # 去掉列表中的空字符串及空格
    lines = [line.strip() for line in lines if line.strip()]

    # 去掉每个句子中的空格
    lines = [line.replace(" ","") for line in lines]
    
    # 去掉所有长度大于max_len的句子
    lines = [line for line in lines if len(line)<=max_len]

    phrase_expel = lines

    # print("phrase_expel:",phrase_expel[:10])
    # print("phrase_expel len:",len(phrase_expel))
    print("原始集", phrase_expel)
    with open('data/generate_pkl/train_data.pkl', 'wb') as f: #把这个处理后的文件当作训练数据
        pkl.dump(phrase_expel, f)   #把文件写成pkl格式

build_train_data("data/test_result.txt")# 创建原始数据文件，没有进行分词

def build_target(file_path):  #生成目标文件
    with open(file_path,'r',encoding='utf-8') as f:
        lines = f.read()  #tmp为每一行的数据

    # t = []
    # for i in tmp: #删掉引号和回车，这里不删空格，空格是分词的标志
    #     t1 = i.replace('“','')
    #     t1 = t1.replace("”", '')
    #     t2 = t1.replace('\n','')
    #     t.append(t2)
    # 将读取到的文本按照标点符号和换行符进行切分，得到一个列表
    lines = re.split(r"[，。！？…；\n]",lines)

    # 去掉列表中的空字符串及空格
    lines = [line.strip() for line in lines if line.strip()]

    # 不能去掉每个句子中的空格
    # lines = [line.replace(" ","") for line in lines]
    
    # 去掉所有长度大于max_len的句子
    # lines = [line for line in lines if len(line)<=max_len]

    sum_list = []
    for i in lines:
        sum_ = ''
        for j in i.split():  #以空格为分割，一个词一个词的提取
            if len(j) == 1: #如果词的长度为1 ，就标记为S -single
                sum_ += 'S'
                continue
            else:
                sum_ += 'B' #如果长度不为1，标记为一个词的开始 begin
                for k in range(1, len(j)):
                    if k == len(j) - 1: #如果是这个词的最后一个，就标记为end
                        sum_ += 'E'
                    else:
                        sum_ += 'M'  #其他情况就是middle
        sum_list.append(sum_)

    print("目标集", sum_list)
    with open('data/generate_pkl/target.pkl', 'wb') as f:
        pkl.dump(sum_list, f)

build_target('data/test_result.txt')# 这个文件数据进行分词了

def build_vocab_dict(file_path):  #'data/train_data.pkl'
    vocab_dic = {}
    with open(file_path,'r',encoding='utf-8') as f:
        lines = f.readlines()  #tmp为每一行的数据
        # 符号
        # lines = re.split(r"[，。！？、（）—《》…；“”\n]",lines)

        # 去掉列表中的空字符串及空格
        # lines = [line.strip() for line in lines if line.strip()]

        # # 去掉每个句子中的空格
        # lines = [line.replace(" ","") for line in lines]
        for line in lines:
            line = "".join(re.split(r"[\s]",line))
            for hang in line:  #统计词频，按照词多到少排列
                # if hang in r"，。！？、（）—《》…；“”\"：:\n":
                #     continue
                # print("词", hang)
                vocab_dic[hang] = vocab_dic.get(hang, 0) + 1
        vocab_dic_sorted = sorted(vocab_dic.items(), key=lambda x: x[1], reverse=True)

    # 按照字排序后，构建字典，字频越高，索引越小，并且下标从1开始
    vocab_dic2 = {}
    for i, j in enumerate(vocab_dic_sorted):
        vocab_dic2[j[0]] = i + 1

    # 展示前10个词
    # print("vocab_dic2:",list(vocab_dic2.items())[:10])
    # print("vocab_dic2 len:",len(vocab_dic2))
    print(vocab_dic2)

    with open('data/generate_pkl/vocab.pkl', 'wb') as f:
        pkl.dump(vocab_dic2, f)

build_vocab_dict('data/test_result.txt')# 统计字频

class Config(object):
    # 参数设置类，包含一些相关参数
    def __init__(self):
        self.vocab = pkl.load(open('data/generate_pkl/vocab.pkl', 'rb'))  # 读取词表
        self.train_data = pkl.load(open('data/generate_pkl/train_data.pkl', 'rb'))  # 读取训练数据
        self.target = pkl.load(open('data/generate_pkl/target.pkl', 'rb'))  # 读取标签

        self.learning_rate = 0.0015  # 学习率
        self.epoch = 9  # epoch次数
        self.dropout = 0.6 # dropout
        self.max_len = 32

        self.output_size = 4
        self.embed_dim = 128
        self.hidden_dim = 64
        self.hout1 = 32
        self.hout2 = 64

        self.num_layers = 2 # 测试双层LSTM神经网络

        print("---------创建参数类完成---------")
        print("# 词表大小：", len(self.vocab))
        print("# 训练数据大小：", len(self.train_data))
        print("# 标签大小：", len(self.target))
        print("# 输出大小：", self.output_size)
        print("# 嵌入层大小：", self.embed_dim)
        print("# 隐藏层大小：", self.hidden_dim)
        print("# 第一层输出大小：", self.hout1)
        print("# 第二层输出大小：", self.hout2)
        print("# 学习率：", self.learning_rate)
        print("# epoch次数：", self.epoch)
        print("# dropout层：", self.dropout)
        print("# 每个LSTM中循环次数：", self.num_layers)
        print("-------------------------------")

#设置参数的起点
torch.manual_seed(1)
config = Config()
voc_size = len(config.vocab)

train_data_list = []
for lin in config.train_data:
    hang = [] #一行
    for word in lin:
        hang.append(config.vocab[word])
    # 如果句子长度小于max_len，则在句子后面补0，使得句子长度等于max_len
    if len(hang) < config.max_len:
        hang.extend([0] * (config.max_len - len(hang)))
    # 如果句子长度大于max_len，则截取句子，使得句子长度等于max_len
    else:
        hang = hang[:config.max_len]
    
    # 将列表类型转变为张量类型
    train_data_list.append(torch.tensor(hang, dtype=torch.long))
print(train_data_list[:1])
print("数据行数", len(train_data_list))

from omod import BiLSTM_Model, target_dict

# 将标签转换为one-hot编码
target_list = []
for lin in config.target:
    hang = []
    for word in lin:
        hang.append(target_dict[word])
    # 如果句子长度小于max_len，则在句子后面补0，使得句子长度等于max_len
    if len(hang) < config.max_len:
        hang.extend([0] * (config.max_len - len(hang)))
    # 如果句子长度大于max_len，则截取句子，使得句子长度等于max_len
    else:
        hang = hang[:config.max_len]
    target_list.append(torch.tensor(hang, dtype=torch.long))
print(target_list[:1])
print("目标行数", len(target_list))


model = BiLSTM_Model(voc_size + 1, config.embed_dim, config.hidden_dim)
print(model)

# 定义损失函数和优化器
loss_function = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)

# GPU/CPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
model.to(device)
train_data_list = [i.to(device) for i in train_data_list]
target_list = [i.to(device) for i in target_list]

# 开始训练
for epoch in range(config.epoch):
    print("Epoch:", epoch + 1)
    for i in tqdm(range(len(train_data_list))):
        model.zero_grad()
        input_data = train_data_list[i].view(1, -1)
        target = target_list[i].view(-1)
        output = model(input_data)
        loss = loss_function(output.view(-1, 5), target)
        loss.backward()
        optimizer.step()
    print("Loss:", loss.item())
torch.save(model.state_dict(), 'model/bilstm_model.pkl')