from bert4keras.snippets import text_segmentate
import os
import json
import glob
import re
import numpy as np

seps = u'\n。！？!?；;，, '
strips = u'；;，, '

def load_data(filenames,maxlen=512):
    """加载数据，并尽量划分为不超过maxlen的句子
        """
    D = []
    with open(filenames,encoding='utf-8') as f:
        for line in f:
            text,label = line.strip().split('\t')
            for t in text_segmentate(text,maxlen - 2,seps,strips):
                D.append((t,int(label)))
    return D



class SentimentLoader():
    def __init__(self,data_path = r'F:\nlp_data\classify\sentiment',maxlen=512):
        self.train_path = os.path.join(data_path,'sentiment.train.data')
        self.valid_path = os.path.join(data_path, 'sentiment.valid.data')
        self.test_path = os.path.join(data_path, 'sentiment.test.data')
        self.maxlen = maxlen

    def get_train_data(self):
        return load_data(self.train_path,self.maxlen)

    def get_valid_data(self):
        return load_data(self.valid_path,self.maxlen)

    def get_test_data(self):
        return load_data(self.test_path,self.maxlen)



def load_lcqmc_data(filenames,maxlen=512):
    """加载数据，并尽量划分为不超过maxlen的句子
        """
    D = []
    with open(filenames,encoding='utf-8') as f:
        for line in f:
            text1,text2,label = line.strip().split('\t')
            text1 = text_segmentate(text1,maxlen - 2,seps,strips)[0]
            text2 = text_segmentate(text2, maxlen - 2, seps, strips)[0]
            D.append((text1, text2, int(label)))
    return D


class LCQMCLoader():
    def __init__(self,data_path = r'F:\nlp_data\classify\iflytek',maxlen=512):
        self.train_path = os.path.join(data_path,'train.txt')
        self.valid_path = os.path.join(data_path, 'dev.txt')
        self.test_path = os.path.join(data_path, 'test.txt')
        self.maxlen = maxlen

    def get_train_data(self):
        return load_lcqmc_data(self.train_path,self.maxlen)

    def get_valid_data(self):
        return load_lcqmc_data(self.valid_path,self.maxlen)

    def get_test_data(self):
        return load_lcqmc_data(self.test_path,self.maxlen)


class IflytekDataLoader():
    def __init__(self,data_path = r'F:\nlp_data\classify\iflytek'):
        self.train_path = os.path.join(data_path,'train.json')
        self.valid_path = os.path.join(data_path, 'dev.json')
        self.test_path = os.path.join(data_path, 'test.json')
        self.label_path = os.path.join(data_path,'labels.json')

    def get_train_data(self):
        return self.load_data(self.train_path)

    def get_valid_data(self):
        return self.load_data(self.valid_path)

    def get_test_data(self):
        return self.load_data(self.test_path)

    def load_data(self,filename):
        D = []
        with open(filename,encoding='utf-8') as f:
            for i,l in enumerate(f):
                l = json.loads(l)
                text,label = l['sentence'],l['label']
                D.append((text,int(label)))
        return D


class JinyongDataLoader():
    '''
    金庸小说数据集加载
    '''
    def __init__(self,data_path=r'F:\nlp_data\textgenerate\金庸作品集',maxlen=512):
        self.data_path = data_path
        self.maxlen = maxlen


    def get_train_data(self):
        novels = []
        for txt in glob.glob(os.path.join(self.data_path, '*.txt')):
            with open(txt,encoding='utf-8') as fread:
                for line in  fread:
                    text = line.replace("\r","").replace("\n","")
                    text = text.replace(u'整理制作，并提供下载',"")
                    text = re.sub(u'www.*?com','',text)
                    text = text.replace(u'\u3000',' ')
                    sents = []
                    if text == "":
                        continue
                    for t in text.split('  '):
                        for s in re.findall(u'.*?。',t):
                            if len(s) <= self.maxlen - 2:
                                sents.append(s)
                    novels.append(sents)
        return novels


class QADataLoader():
    '''
    qa小说数据集加载,sogou qa 和web qa数据集
    '''
    def __init__(self,data_path=r'E:\nlp_data\qa',maxlen=512):
        self.data_path = data_path
        self.maxlen = maxlen

        sogou_data_path = os.path.join(data_path,'SogouQA.json')
        webqa_data_path = os.path.join(data_path,'WebQA.json')

        webqa_data = json.load(open(sogou_data_path,encoding='utf-8'))
        sogou_data = json.load(open(webqa_data_path,encoding='utf-8'))

        random_order = list(range(len(sogou_data_path)))
        np.random.shuffle(random_order)

        self.train_data = [sogou_data[j] for i, j in enumerate(random_order) if i % 3 != 0]
        self.valid_data = [sogou_data[j] for i, j in enumerate(random_order) if i % 3 == 0]
        self.train_data.extend(self.train_data)
        self.train_data.extend(webqa_data)  # 将SogouQA和WebQA按2:1的比例混合


    def get_train_data(self):
        return self.train_data

    def get_valid_data(self):
        return self.valid_data


class UbuntuCornellDataLoader():
    '''
        ubutun对话系统
        '''

    def __init__(self, data_path=r'F:\nlp_data\chat\cornell_movie_dialogs_corpus\cornell movie-dialogs corpus', maxlen=512):
        self.data_path = data_path
        self.maxlen = maxlen

    def clean_text(self,txt):
        txt = txt.lower()
        txt = re.sub(r"i'm", "i am", txt)
        txt = re.sub(r"he's", "he is", txt)
        txt = re.sub(r"she's", "she is", txt)
        txt = re.sub(r"that's", "that is", txt)
        txt = re.sub(r"what's", "what is", txt)
        txt = re.sub(r"where's", "where is", txt)
        txt = re.sub(r"\'ll", " will", txt)
        txt = re.sub(r"\'ve", " have", txt)
        txt = re.sub(r"\'re", " are", txt)
        txt = re.sub(r"\'d", " would", txt)
        txt = re.sub(r"won't", "will not", txt)
        txt = re.sub(r"can't", "can not", txt)
        txt = re.sub(r"[^\w\s]", "", txt)
        return txt

    def get_train_data(self):
        lines = open(os.path.join(self.data_path,'movie_lines.txt'), encoding='utf-8',
                     errors='ignore').read().split('\n')

        convers = open(os.path.join(self.data_path,'movie_conversations.txt'), encoding='utf-8',
                       errors='ignore').read().split('\n')

        exchn = []
        for conver in convers:
            exchn.append(conver.split(' +++$+++ ')[-1][1:-1].replace("'", " ").replace(",", "").split())


        diag = {}
        for line in lines:
            diag[line.split(' +++$+++ ')[0]] = line.split(' +++$+++ ')[-1]

        del (lines, convers, conver, line)

        questions = []
        answers = []

        for conver in exchn:
            for i in range(len(conver) - 1):
                questions.append(diag[conver[i]])
                answers.append(diag[conver[i + 1]])


        ## delete
        del (diag, exchn, conver, i)

        sorted_ques = []
        sorted_ans = []
        for i in range(len(questions)):
            if len(questions[i]) < self.maxlen:
                sorted_ques.append(questions[i])
                sorted_ans.append(answers[i])

        clean_ques = []
        clean_ans = []
        for line in sorted_ques:
            clean_ques.append(self.clean_text(line))

        for line in sorted_ans:
            clean_ans.append(self.clean_text(line))

        ## delete
        del (answers, questions, line)
        for i in range(len(clean_ans)):
            clean_ans[i] = ' '.join(clean_ans[i].split()[:self.maxlen - 2])

        del (sorted_ans, sorted_ques)

        ## trimming
        clean_ans = clean_ans[:3]
        clean_ques = clean_ques[:3]

        train_data = []
        for q,a in zip(clean_ques,clean_ans):
            train_data.append((q,a))


        return train_data

