import os
import pandas as pd
from tqdm import tqdm
import numpy as np
from torch.utils.data import Dataset, DataLoader
import torch
from pytorch_transformers import BertConfig, BertModel, BertTokenizer
from functools import reduce
from config import conf
from data_utils.basic_data_round2 import load_basic_dataset, load_train_val_dataset, load_train_val_dataset_cross
from results_process.regulizer import remove_short_entity
import jieba

DEVICE_ID = conf.get('gpu', 'device_id')
token_CLSE = '[CLES]'
token_SEPE = '[SEEP]'
token_CLS = '[CLS]'
token_SEP = '[SEP]'
ENTITY_NUM = 8
import re

model_base = '/home/njuciairs/wangshuai/pretrained_models/chinese_roberta_wwm_ext_pytorch'
# 去重：把更短的去掉
import numpy as np

# 去重：把更短的去掉
import numpy as np


def remove_short_entity_by_long(entity_str):
    """
    除去key_entity中同一实体的较短名称
    :param entity_str:
    :return:
    """
    if not isinstance(entity_str, str):
        return entity_str
    entities = entity_str.split(';')
    states = np.ones(len(entities))
    for i, e in enumerate(entities):
        for p in entities:
            if e in p and len(e) < len(p):
                print('removed %s by %s' % (e, p))
                states[i] = 0
    rs = []
    for i, e in enumerate(entities):
        if states[i] == 1:
            rs.append(e)
    rs = ';'.join(rs)
    return rs


good_remove = False


def get_trans_map():
    train_df = load_basic_dataset('train')
    srcs = train_df['entity'].map(lambda x: list(str(x).split(';')))
    dests = train_df['key_entity'].map(lambda x: list(str(x).split(';')))
    trans_map = {}
    both_existed_log = ''
    for srcs, dests in list(zip(srcs, dests)):
        for src in srcs:
            if src == '':
                continue
            for e in srcs:
                if e == '':
                    continue
                if (src in e or e in src) and e != src:
                    if src in dests:
                        trans_map[src + '-' + e] = src
                        trans_map[e + '-' + src] = src
                    if e in dests:
                        trans_map[src + '-' + e] = e
                        trans_map[e + '-' + src] = e
                    if good_remove:
                        if src in dests and e in dests:
                            trans_map[src + '-' + e] = e + ';' + src
                            trans_map[e + '-' + src] = e + ';' + src
                            both_existed_log += e + ';' + src
                            print('both existsed:', e + ';' + src)
    return trans_map


def trans_keys(trans_map, entity_str):
    if not isinstance(entity_str, str):
        return entity_str
    es = list(filter(lambda x: str(x).strip() != '', entity_str.split(';')))
    rs = set()
    for e in es:
        finded = False
        for y in es:
            if e + '-' + y in trans_map and e != y:
                rs.add(trans_map[e + '-' + y])
                finded = True
        if not finded:
            rs.add(e)
    if len(rs) > 0:
        rs = ';'.join(list(rs))
    else:
        rs = np.nan
    return rs


trans_map = get_trans_map()


def text_startwith_title(title, text, threshold):
    title = str(title)
    text = str(text)
    if isinstance(title, float):
        return True
    title_set = set(title)
    text_set = set(text[:len(title)])
    common = len(title_set & text_set) / len(title_set)
    return common > threshold


def get_text_preprocessed(title, text, entities, max_text_length, entity_start_char, entity_end_char):
    title = str(title)
    text = str(text)
    entities = str(entities)
    if not text_startwith_title(title, text, 0.9):
        text = title + '\n' + text
    entities = trans_keys(trans_map, str(entities))
    entities = remove_short_entity_by_long(entities)
    text = text[:max_text_length]
    pattern = re.compile(r"[^\u4e00-\u9fa5@?（）【】《》“”‘’#？()[];:；：。.、]")
    text = re.sub(pattern, ' ', text)
    index_pairs = []
    for entity in entities.split(';'):
        matched_pair = [sub.span() for sub in
                        re.finditer(
                            entity.replace(r'?', r'\?').replace(r'*', r'\*').replace(r'(', r'\(').replace(r')', r'\)'),
                            text)]
        index_pairs += matched_pair
    index_pairs.sort()
    maxrange_index_pairs = []
    if len(index_pairs) == 0:
        pair_entities = []
        for entity in entities.split(';'):
            text = entity_start_char+entity+entity_end_char+text
            pair_entities.append(entity)
        if len(pair_entities)==0:
            raise Exception('没有实体可以判断')
        return text, pair_entities
    start, end = index_pairs[0]
    for pair in index_pairs:
        start1, end1 = pair
        if end1 <= end:
            start = min(start, start1)
        else:
            maxrange_index_pairs.append((start, end))
            start = start1
            end = end1
    if end1 <= end:
        start1 = min(start, start1)
    maxrange_index_pairs.append((start1, end))
    conflict_pairs = []
    for i in range(len(maxrange_index_pairs) - 1):
        start1, end1 = maxrange_index_pairs[i]
        start2, end2 = maxrange_index_pairs[i + 1]
        if start2 < end1:
            conflict_pairs.append((start2, end2))

    off = 0
    text = list(text)
    pair_entities = []
    for pair in maxrange_index_pairs:
        if pair not in conflict_pairs:
            text.insert(pair[0] + off, entity_start_char)
            off += 1
            entity = ''.join(text[pair[0] + off:pair[1] + off])
            pair_entities.append(entity)
            text.insert(pair[1] + off, entity_end_char)
            off += 1
    print(conflict_pairs)
    return ''.join(text), pair_entities


class TextEntitySample():
    def __init__(self, id, init_text, entities, title, senti_label, key_entity, max_len):
        miner_length = len(entities.split(';'))
        print(id)
        text, entity_pairs = get_text_preprocessed(title, init_text, entities,
                                                   max_text_length=max_len - miner_length * 5,
                                                   entity_start_char=token_CLSE,
                                                   entity_end_char=token_SEPE)
        self.predict_text = text
        self.id = id
        self.senti_label = senti_label
        keys = str(key_entity).split(';')
        self.pair_labels = [(e in keys) for e in entity_pairs]
        self.entity_pairs = entity_pairs


class TestTextEntitySample():
    def __init__(self, id, text, entities, title, max_len):
        entities = str(entities)
        miner_length = len(entities.split(';'))
        text, entity_pairs = get_text_preprocessed(title, text, entities, max_text_length=max_len - miner_length * 5,
                                                   entity_start_char=token_CLSE, entity_end_char=token_SEPE)
        self.predict_text = text
        self.id = id
        self.entity_pairs = entity_pairs


class EntityDataset(Dataset):
    def __init__(self, df, max_len=300):
        self.samples = list(self.make_samples(df, max_len))
        self.len = len(self.samples)

    def make_samples(self, df, max_len):
        for id, negative, text, title, entity, key_entity in df[
            ['id', 'negative', 'text', 'title', 'entity', 'key_entity']].values:
            sample = TextEntitySample(id, text, entity, title, negative, key_entity, max_len)
            yield sample

    def __getitem__(self, index):
        return self.samples[index]

    def __len__(self):
        return self.len


class TestEntityDataset(Dataset):
    def __init__(self, df, max_len=300):
        self.samples = list(self.make_samples(df, max_len))
        self.len = len(self.samples)

    def make_samples(self, df, max_len):
        for id, text, title, entity in df[['id', 'text', 'title', 'entity']].values:
            sample = TestTextEntitySample(id, text, entity, title, max_len)
            yield sample

    def __getitem__(self, index):
        return self.samples[index]

    def __len__(self):
        return self.len


def convert_texts_to_features(texts, tokenizer, max_len):
    input_ids = [tokenizer.encode(text[:max_len + 100])[:max_len] for text in texts]
    text_lens = [len(ids) for ids in input_ids]
    attention_mask = torch.LongTensor([([1] * l + [0] * (max_len - l)) for l in text_lens])
    input_ids = torch.LongTensor([ids + [tokenizer.pad_token_id] * (max_len - len(ids)) for ids in input_ids])
    return input_ids, attention_mask


def convert_batch(batch_samples, tokenizer, device, max_len=300):
    texts = [sample.predict_text for sample in batch_samples]
    text_ids, attn_mask = convert_texts_to_features(texts, tokenizer, max_len)
    text_ids = text_ids.to(device)
    attn_mask = attn_mask.to(device)

    entity_labels = reduce(lambda x, y: x + y, [sample.pair_labels for sample in batch_samples])

    entity_labels = torch.LongTensor(entity_labels).to(
        device)

    sentiment_labels = torch.LongTensor([sample.senti_label for sample in batch_samples]).to(device)
    return text_ids, entity_labels, sentiment_labels, attn_mask


def convert_test_batch(batch_samples, tokenizer, device, max_len):
    texts = [sample.predict_text for sample in batch_samples]
    text_ids, attn_mask = convert_texts_to_features(texts, tokenizer, max_len)
    text_ids = text_ids.to(device)
    attn_mask = attn_mask.to(device)
    ids = [sample.id for sample in batch_samples]
    entity_pairs = [sample.entity_pairs for sample in batch_samples]
    return text_ids, attn_mask, ids, entity_pairs


def get_train_val_data_loader(device, batch_size, shuffle, maxlen=300):
    train_df, val_df = load_train_val_dataset(split_ratio=0.8)
    train_df = train_df.dropna(subset=['entity'])
    val_df = val_df.dropna(subset=['entity'])
    train_dataset = EntityDataset(train_df, max_len=maxlen)
    val_dataset = EntityDataset(val_df, max_len=maxlen)

    tokenizer = BertTokenizer.from_pretrained(model_base)
    tokenizer.add_tokens([token_CLSE, token_SEPE])

    train_dataloader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=shuffle,
                                  collate_fn=lambda batch_samples: convert_batch(batch_samples, tokenizer, device,
                                                                                 maxlen))
    val_dataloader = DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=shuffle,
                                collate_fn=lambda batch_samples: convert_batch(batch_samples, tokenizer, device,
                                                                               maxlen))
    return train_dataloader, val_dataloader, tokenizer


def get_test_loader(device, batch_size, maxlen=300):
    test_df = load_basic_dataset(split='test')
    test_dataset = TestEntityDataset(test_df, maxlen)
    tokenizer = BertTokenizer.from_pretrained(model_base)
    tokenizer.add_tokens([token_CLSE, token_SEPE])
    test_data_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False,
                                  collate_fn=lambda batch_samples: convert_test_batch(batch_samples, tokenizer, device,
                                                                                      maxlen))
    return test_data_loader


def get_test_loader_by_split_id(device, batch_size, test_number, cross_number=9, maxlen=300):
    val_df, train_df = load_train_val_dataset_cross(test_number, cross_number, tmpdir='tmp_round2_full')
    test_df = val_df

    test_dataset = TestEntityDataset(test_df, maxlen)
    tokenizer = BertTokenizer.from_pretrained(model_base)
    tokenizer.add_tokens([token_CLSE, token_SEPE])
    test_data_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False,
                                  collate_fn=lambda batch_samples: convert_test_batch(batch_samples, tokenizer, device,
                                                                                      maxlen))
    return test_data_loader


def get_test_loader_by_df(test_df, device, batch_size, maxlen=300):
    test_dataset = TestEntityDataset(test_df, maxlen)
    tokenizer = BertTokenizer.from_pretrained(model_base)
    tokenizer.add_tokens([token_CLSE, token_SEPE])
    test_data_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False,
                                  collate_fn=lambda batch_samples: convert_test_batch(batch_samples, tokenizer, device,
                                                                                      maxlen))
    return test_data_loader


def get_train_val_data_loader_cross(device, batch_size, shuffle, test_number, cross_number=9, maxlen=300):
    val_df, train_df = load_train_val_dataset_cross(test_number, cross_number)
    train_df = train_df.dropna(subset=['entity'])
    val_df = val_df.dropna(subset=['entity'])
    train_df = train_df.dropna(subset=['entity'])
    train_dataset = EntityDataset(train_df, max_len=maxlen)
    val_dataset = EntityDataset(val_df, max_len=maxlen)

    tokenizer = BertTokenizer.from_pretrained(model_base)
    tokenizer.add_tokens([token_CLSE, token_SEPE])

    train_dataloader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=shuffle,
                                  collate_fn=lambda batch_samples: convert_batch(batch_samples, tokenizer, device,
                                                                                 maxlen))
    val_dataloader = DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=shuffle,
                                collate_fn=lambda batch_samples: convert_batch(batch_samples, tokenizer, device,
                                                                               maxlen))
    return train_dataloader, val_dataloader, tokenizer


if __name__ == '__main__':
    device = torch.device("cuda:%s" % (DEVICE_ID) if torch.cuda.is_available() else "cpu")
    train_dataloader, val_dataloader, tokenizer = get_train_val_data_loader(device, 8, True)
    for batch in tqdm(train_dataloader):
        text_ids, entity_labels, sentiment_labels, attn_mask = batch
        break
    CLSE_ID = tokenizer.convert_tokens_to_ids([token_CLSE])[0]
    pos = text_ids == CLSE_ID

    print(torch.sum(pos), len(entity_labels))
    print([tokenizer.convert_ids_to_tokens(texti.tolist()) for texti in text_ids])
    print(pos)
    # print(batch)
