import random
import torch
from tqdm import tqdm
import time
from datetime import timedelta
from Bert_RCNN_Pytorch.models import bert_RCNN as bert_RCNN

PAD, CLS = '[PAD]', '[CLS]'  # padding符号, bert中综合信息符号

# 读取数据集
def load_dataset(config, path, pad_size=32):
    contents = []
    with open(path, 'r', encoding='UTF-8') as f:
        # tqdm添加进度条
        for line in tqdm(f):
            # strip()去除回车
            lin = line.strip()
            if not lin:
                continue
            if len(lin.split('\t')) != 2:
                continue
            [content, label] = lin.split('\t')
            # convert_ids_to_tokens调用此方法
            token = config.tokenizer.tokenize(content)
            token = [CLS] + token

            seq_len = len(token)
            mask = []
            token_ids = config.tokenizer.convert_tokens_to_ids(token)

            if pad_size:
                if len(token) < pad_size:
                    mask = [1] * len(token_ids) + [0] * (pad_size - len(token))
                    token_ids += ([0] * (pad_size - len(token)))
                else:
                    mask = [1] * pad_size
                    token_ids = token_ids[:pad_size]
                    seq_len = pad_size
            contents.append((token_ids, int(label), seq_len, mask))
    return contents


# 创建数据集
def build_dataset(config):
    train = load_dataset(config, config.train_path, config.pad_size)
    dev = load_dataset(config, config.dev_path, config.pad_size)
    test = load_dataset(config, config.test_path, config.pad_size)
    return train, dev, test


class DatasetIterater(object):
    def __init__(self, batches, batch_size, device):
        self.batch_size = batch_size
        self.batches = batches
        self.n_batches = len(batches) // batch_size
        self.residue = False  # 记录batch数量是否为整数
        if len(batches) % self.n_batches != 0:
            self.residue = True
        self.index = 0
        self.device = device

    def _to_tensor(self, datas):
        x = torch.LongTensor([_[0] for _ in datas]).to(self.device)
        y = torch.LongTensor([_[1] for _ in datas]).to(self.device)

        # pad前的长度(超过pad_size的设为pad_size)
        seq_len = torch.LongTensor([_[2] for _ in datas]).to(self.device)
        mask = torch.LongTensor([_[3] for _ in datas]).to(self.device)
        return (x, seq_len, mask), y

    def __next__(self):
        if self.residue and self.index == self.n_batches:
            batches = self.batches[self.index * self.batch_size: len(self.batches)]
            self.index += 1
            batches = self._to_tensor(batches)
            return batches

        elif self.index >= self.n_batches:
            self.index = 0
            raise StopIteration
        else:
            batches = self.batches[self.index * self.batch_size: (self.index + 1) * self.batch_size]
            self.index += 1
            batches = self._to_tensor(batches)
            return batches

    def __iter__(self):
        return self

    def __len__(self):
        if self.residue:
            return self.n_batches + 1
        else:
            return self.n_batches


def build_iterator(dataset, config):
    iter = DatasetIterater(dataset, config.batch_size, config.device)
    return iter

"""
    获取已使用时间
"""
def get_time_dif(start_time):
    end_time = time.time()
    time_dif = end_time - start_time
    return timedelta(seconds=int(round(time_dif)))

'''
    划分数据集 按照比例8:1:1
'''
def divideDataSet(path):
    with open(path, 'r', encoding='utf8')as f:
        lines = f.readlines()
    f.close()
    # 文本行数
    lineLength = len(lines)
    # 存储行号
    lineId = [i for i in range(lineLength)]
    # 将序号打乱
    random.shuffle(lineId)
    # 按照8:1:1的比例分配
    train_num = lineId[:int(lineLength * 0.8)]
    test_num = lineId[int(lineLength * 0.8):int(lineLength * 0.9)]
    valid_num = lineId[int(lineLength * 0.9):lineLength]
    # 将输出文件放置的位置
    outputLocation = "F:\\graduationDesign\\BERT\\Bert_RCNN_Pytorch\\dataSet\\data"
    with open(outputLocation + '\\' + 'train.txt', mode='w+', encoding="utf-8")as train, open(outputLocation + '\\' + 'test.txt', 'w+', encoding="utf-8")as test, \
            open(outputLocation + '\\' + 'valid.txt', mode='w+', encoding="utf-8")as valid:
        i = 0
        for line in lines:
            if i in train_num:
                train.write(line)
            elif i in test_num:
                test.write(line)
            else:
                valid.write(line)
            i += 1
    # 关闭写操作
    train.close()
    test.close()
    valid.close()


# divideDataSet("F:\\graduationDesign\\dataSet\\Chinese_Rumor_Dataset-master\\CED_Dataset\\enrichData.txt")

'''
    打乱文本
'''
def shuffleTxt(sourceTxt, target):
    out = open(target, mode='w+', encoding="utf-8")
    lines = []
    with open(sourceTxt, mode='r', encoding="utf-8") as infile:
        for line in infile:
            lines.append(line)
        random.shuffle(lines)
        for line in lines:
            out.write(line)
    infile.close()

# shuffleTxt(sourceTxt='F:\\graduationDesign\\dataSet\\Chinese_Rumor_Dataset-master\\CED_Dataset\\output2.txt',
#            target='F:\\graduationDesign\\dataSet\\Chinese_Rumor_Dataset-master\\CED_Dataset\\enrichData.txt')


# 读取模型，判断是否是谣言
def judgeRumor(config, text):
    model = bert_RCNN.Model(config)
    model.load_state_dict(torch.load(config.save_path))
    model.eval()
    encode_text = encodeText(config, text, config.pad_size)
    out = model(encode_text)
    # 输出谣言的概率
    m = torch.nn.Softmax()
    out = m(out)
    # 获取每行的最大值  先转成CPU float-tensor再转成numpy
    probability = torch.max(out.data, 1)[0].cpu().numpy()[0]
    isRumor = torch.max(out.data, 1)[1].cpu().numpy()[0]
    # print(isRumor)
    # print(probability)
    # print(model.save_out)
    # print(model.save_out.size())
    return model.save_out



def encodeText(config, content, pad_size=32):
    # convert_ids_to_tokens调用此方法
    token = config.tokenizer.tokenize(content)
    token = [CLS] + token

    seq_len = len(token)
    mask = []
    token_ids = config.tokenizer.convert_tokens_to_ids(token)
    if pad_size:
        if len(token) < pad_size:
            mask = [1] * len(token_ids) + [0] * (pad_size - len(token))
            token_ids += ([0] * (pad_size - len(token)))
        else:
            mask = [1] * pad_size
            token_ids = token_ids[:pad_size]
            seq_len = pad_size

    # 将tensor变成二维的数据
     # tokenIds
    tokenIds = []
    tokenIds.append(token_ids)
    tokenIds.append([ 101,  137, 1062, 2128, 6956, 1036, 4997, 1927, 6679,  928, 2622, 5165,
         2593, 1355, 2357, 2398, 1378, 2593, 2823, 2111, 2094, 8024, 3724, 6760,
         2141, 7741, 2207, 2110, 2192,  782, 1423,  752, 9081, 8471, 8544, 8952,
         9446, 2376, 2564, 2810, 3141, 8024,  791, 1921,  677, 1286,  671,  702,
          676, 2259, 1914, 2207, 1957, 2111, 1762, 7239, 5323, 5709, 1736, 2207,
         1277, 7353, 6818, 6158,  782, 2866, 6624,  749, 8024, 2207, 1957, 2111,
         5543, 6432, 1139, 1961, 4268, 4268, 4638, 2797])
     # seqLen
    seqLen = []
    seqLen.append(seq_len)
    seqLen.append(80)
     # Mask
    Mask = []
    Mask.append(mask)
    Mask.append([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
         1, 1, 1, 1, 1, 1, 1, 1])

    # 转成tensor
    token_ids = torch.tensor(tokenIds)
    seqLen = torch.tensor(seqLen)
    mask = torch.tensor(Mask)

    return (token_ids, seqLen, mask)

if __name__ == '__main__':
    # 测试模型读取和谣言检测
    dataset = 'dataSet'  # 数据集
    config = bert_RCNN.Config(dataset)
    text_rumor = '车子已经清出了 滨海至上海班的2188次汽车 死亡人数38人 真的是特大事故了 令人触目惊心 潸然泪下[流泪][流泪]……我不知道该说什么了 只觉得阵阵莫名的心痛……死者长已矣 生者如斯夫愿已故的38位乘客一路走好【天堂没有悲剧……】亦衷心地祝愿活着的人勇敢面对伤痛 坚强地活下去'
    text_non_rumor = '【广州大学城吃货注意！明天番禺美食节！离大学城超近！】明天开始，广州美食节连续七天，门票免费！有来自全世界的600多种小吃。12月23日～30日上午10点～晚上9点，地点：番禺大道南大路口中华美食广场。等候各方吃货！交通路线猛戳《美食节攻略》http://t.cn/zj0pC1F via@广东大学生头条新闻'
    # text = '@公安部儿童失踪信息紧急发布平台 急找孩子，求转 实验小学 寻人启事 13930886687 帮忙扩散，今天上午一个三岁多小女孩在锦绣花园小区附近被人拐走了，小女孩能说出她爸爸的手机号码 从监控上看是被一四十多岁男人抱走了现大人都急疯了 联系人 张静杰13930886687'
    judgeRumor(config, text_non_rumor)