#导入库
import csv
import time

from model import *
from pre_process import *
import torch

def _purify(txt):
    return [filter_sent(qa) for qa in txt]

isCharVec=False
maxSentenceWordsNum=25
#读入对话间隔为/t文件
with open(r"./data/test.tsv", 'r', encoding='utf-8') as f:
    # 读取每一行
    txts = _purify(f.readlines())
    maxTxtLength = 50000
    countS = 0
    num = int(len(txts) / maxTxtLength)
    for i in range(0, num + 1):
        countE = countS + maxTxtLength
        if countE > len(txts):
            countE = len(txts)
        print(countS, '||', countE)
        txt = txts[countS:countE]
        time.sleep(1)
        countS += maxTxtLength
        pre_data = [i.split('\t') for i in txt]
        data = []
        for i in range(0, len(pre_data)):
            if len(pre_data[i]) > 1:
                data.append(pre_data[i])
        # 判断是否为字符，用jieba分词
        if isCharVec:
            data = [[[c for c in i[0]], [c for c in i[1]]] for i in data]
        else:
            data = [[jieba.lcut(i[0]), jieba.lcut(i[1])] for i in data]
        for i in data:
            if len(i[0]) >= maxSentenceWordsNum or len(i[1]) >= maxSentenceWordsNum:
                print(i[0], " || ", i[1])
        data = [i for i in data if (len(i[0]) < maxSentenceWordsNum and len(i[1]) < maxSentenceWordsNum) or maxSentenceWordsNum == -1]

        # 读入数据
        id2word = []
        address = r"model bi+L"
        if os.path.exists("./" + address + ".pkl"):
            checkpoint = torch.load("./" + address + ".pkl")

            id2word = checkpoint['id2word']

            word2id = {i[1]: i[0] for i in enumerate(id2word)}
            wordNum = len(id2word)
        else:
            id2word = None
            word2id = None
            wordNum = None
            # 遍历所有内容

        dataClass = Corpus(data, maxSentenceWordsNum=25, id2word=id2word, word2id=word2id, wordNum=wordNum, tfidf=True)

        # 指定模型和一些超参
        model = Seq2Seq(dataClass, featureSize=256, hiddenSize=256,
                        attnType='L', attnMethod='concat',
                        encoderNumLayers=5, decoderNumLayers=3,
                        encoderBidirectional=True
                        )
        # 训练
        model.train(batchSize=1024, epoch=600, address=address + '.pkl')
        #保存模型
        model.save(address + '.pkl')

'''#读入csv文件
with open(r"D:\PyCharm\project\learningWearinessChatbot\data\ChatRobotCorpusAllData.csv", "r+", encoding='utf-8-sig', newline="") as f:
    alldata = csv.reader(f)
    chats = []
    for chat in alldata:
        chats.append(chat)
    maxnumPrebatch = 50000
    beginNum = 0
    endNum = beginNum + maxnumPrebatch
    for i in range(0, int(len(chats)/maxnumPrebatch)+1):
        data = []
        print(beginNum, " || ", endNum)
        for j in range(beginNum, endNum):
            d = []
            chat = chats[j]
            d.append(chat[0])
            d.append(chat[1])
            data.append(d)
        # 判断是否为字符，用jieba分词
        if isCharVec:
            data = [[[c for c in i[0]], [c for c in i[1]]] for i in data]
        else:
            data = [[jieba.lcut(i[0]), jieba.lcut(i[1])] for i in data]
        #输出超过长度的对话
        for i in data:
            if len(i[0]) >= maxSentenceWordsNum or len(i[1]) >= maxSentenceWordsNum:
                print(i[0], " || ", i[1])
        data = [i for i in data if (len(i[0]) < maxSentenceWordsNum and len(i[1]) < maxSentenceWordsNum) or maxSentenceWordsNum == -1]

        # 读入数据
        id2word = []
        address = r"chatRobotCorpus1"
        if os.path.exists("D:\PyCharm\project\learningWearinessChatbot\\" + address + ".pkl"):
            checkpoint = torch.load("D:\PyCharm\project\learningWearinessChatbot\\" + address + ".pkl")

            id2word = checkpoint['id2word']

            word2id = {i[1]: i[0] for i in enumerate(id2word)}
            wordNum = len(id2word)
        else:
            id2word = None
            word2id = None
            wordNum = None
            # 遍历所有内容

        dataClass = Corpus(data, maxSentenceWordsNum=25, id2word=id2word, word2id=word2id, wordNum=wordNum, tfidf=True)

        # 指定模型和一些超参
        model = Seq2Seq(dataClass, featureSize=256, hiddenSize=256,
                        attnType='L', attnMethod='concat',
                        encoderNumLayers=5, decoderNumLayers=3,
                        encoderBidirectional=True
                        )
        # 训练
        model.train(batchSize=1024, epoch=100, address=address + '.pkl')
        # 保存模型
        model.save(address + '.pkl')
        beginNum = beginNum + maxnumPrebatch
        endNum = endNum + maxnumPrebatch
        if endNum > len(chats):
            endNum = len(chats)'''